1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/klass.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/biasedLocking.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/objectMonitor.hpp" 38 #include "runtime/os.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc/g1/g1CollectedHeap.inline.hpp" 44 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 45 #include "gc/g1/heapRegion.hpp" 46 #endif // INCLUDE_ALL_GCS 47 #ifdef COMPILER2 48 #include "opto/intrinsicnode.hpp" 49 #endif 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) /* nothing */ 53 #define STOP(error) stop(error) 54 #else 55 #define BLOCK_COMMENT(str) block_comment(str) 56 #define STOP(error) block_comment(error); stop(error) 57 #endif 58 59 // Convert the raw encoding form into the form expected by the 60 // constructor for Address. 61 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 62 assert(scale == 0, "not supported"); 63 RelocationHolder rspec; 64 if (disp_reloc != relocInfo::none) { 65 rspec = Relocation::spec_simple(disp_reloc); 66 } 67 68 Register rindex = as_Register(index); 69 if (rindex != G0) { 70 Address madr(as_Register(base), rindex); 71 madr._rspec = rspec; 72 return madr; 73 } else { 74 Address madr(as_Register(base), disp); 75 madr._rspec = rspec; 76 return madr; 77 } 78 } 79 80 Address Argument::address_in_frame() const { 81 // Warning: In LP64 mode disp will occupy more than 10 bits, but 82 // op codes such as ld or ldx, only access disp() to get 83 // their simm13 argument. 84 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 85 if (is_in()) 86 return Address(FP, disp); // In argument. 87 else 88 return Address(SP, disp); // Out argument. 89 } 90 91 static const char* argumentNames[][2] = { 92 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 93 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 94 {"A(n>9)","P(n>9)"} 95 }; 96 97 const char* Argument::name() const { 98 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 99 int num = number(); 100 if (num >= nofArgs) num = nofArgs - 1; 101 return argumentNames[num][is_in() ? 1 : 0]; 102 } 103 104 #ifdef ASSERT 105 // On RISC, there's no benefit to verifying instruction boundaries. 106 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 107 #endif 108 109 // Patch instruction inst at offset inst_pos to refer to dest_pos 110 // and return the resulting instruction. 111 // We should have pcs, not offsets, but since all is relative, it will work out 112 // OK. 113 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 114 int m; // mask for displacement field 115 int v; // new value for displacement field 116 const int word_aligned_ones = -4; 117 switch (inv_op(inst)) { 118 default: ShouldNotReachHere(); 119 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 120 case branch_op: 121 switch (inv_op2(inst)) { 122 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 123 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 124 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 125 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 126 case bpr_op2: { 127 if (is_cbcond(inst)) { 128 m = wdisp10(word_aligned_ones, 0); 129 v = wdisp10(dest_pos, inst_pos); 130 } else { 131 m = wdisp16(word_aligned_ones, 0); 132 v = wdisp16(dest_pos, inst_pos); 133 } 134 break; 135 } 136 default: ShouldNotReachHere(); 137 } 138 } 139 return inst & ~m | v; 140 } 141 142 // Return the offset of the branch destionation of instruction inst 143 // at offset pos. 144 // Should have pcs, but since all is relative, it works out. 145 int MacroAssembler::branch_destination(int inst, int pos) { 146 int r; 147 switch (inv_op(inst)) { 148 default: ShouldNotReachHere(); 149 case call_op: r = inv_wdisp(inst, pos, 30); break; 150 case branch_op: 151 switch (inv_op2(inst)) { 152 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 153 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 154 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 155 case br_op2: r = inv_wdisp( inst, pos, 22); break; 156 case bpr_op2: { 157 if (is_cbcond(inst)) { 158 r = inv_wdisp10(inst, pos); 159 } else { 160 r = inv_wdisp16(inst, pos); 161 } 162 break; 163 } 164 default: ShouldNotReachHere(); 165 } 166 } 167 return r; 168 } 169 170 void MacroAssembler::null_check(Register reg, int offset) { 171 if (needs_explicit_null_check((intptr_t)offset)) { 172 // provoke OS NULL exception if reg = NULL by 173 // accessing M[reg] w/o changing any registers 174 ld_ptr(reg, 0, G0); 175 } 176 else { 177 // nothing to do, (later) access of M[reg + offset] 178 // will provoke OS NULL exception if reg = NULL 179 } 180 } 181 182 // Ring buffer jumps 183 184 185 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 186 assert_not_delayed(); 187 jmpl(r1, r2, G0); 188 } 189 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 190 assert_not_delayed(); 191 jmp(r1, offset); 192 } 193 194 // This code sequence is relocatable to any address, even on LP64. 195 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 196 assert_not_delayed(); 197 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 198 // variable length instruction streams. 199 patchable_sethi(addrlit, temp); 200 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 201 jmpl(a.base(), a.disp(), d); 202 } 203 204 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 205 jumpl(addrlit, temp, G0, offset, file, line); 206 } 207 208 209 // Conditional breakpoint (for assertion checks in assembly code) 210 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 211 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 212 } 213 214 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 215 void MacroAssembler::breakpoint_trap() { 216 trap(ST_RESERVED_FOR_USER_0); 217 } 218 219 // Write serialization page so VM thread can do a pseudo remote membar 220 // We use the current thread pointer to calculate a thread specific 221 // offset to write to within the page. This minimizes bus traffic 222 // due to cache line collision. 223 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 224 srl(thread, os::get_serialize_page_shift_count(), tmp2); 225 if (Assembler::is_simm13(os::vm_page_size())) { 226 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 227 } 228 else { 229 set((os::vm_page_size() - sizeof(int)), tmp1); 230 and3(tmp2, tmp1, tmp2); 231 } 232 set(os::get_memory_serialize_page(), tmp1); 233 st(G0, tmp1, tmp2); 234 } 235 236 237 238 void MacroAssembler::enter() { 239 Unimplemented(); 240 } 241 242 void MacroAssembler::leave() { 243 Unimplemented(); 244 } 245 246 // Calls to C land 247 248 #ifdef ASSERT 249 // a hook for debugging 250 static Thread* reinitialize_thread() { 251 return Thread::current(); 252 } 253 #else 254 #define reinitialize_thread Thread::current 255 #endif 256 257 #ifdef ASSERT 258 address last_get_thread = NULL; 259 #endif 260 261 // call this when G2_thread is not known to be valid 262 void MacroAssembler::get_thread() { 263 save_frame(0); // to avoid clobbering O0 264 mov(G1, L0); // avoid clobbering G1 265 mov(G5_method, L1); // avoid clobbering G5 266 mov(G3, L2); // avoid clobbering G3 also 267 mov(G4, L5); // avoid clobbering G4 268 #ifdef ASSERT 269 AddressLiteral last_get_thread_addrlit(&last_get_thread); 270 set(last_get_thread_addrlit, L3); 271 rdpc(L4); 272 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 273 #endif 274 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 275 delayed()->nop(); 276 mov(L0, G1); 277 mov(L1, G5_method); 278 mov(L2, G3); 279 mov(L5, G4); 280 restore(O0, 0, G2_thread); 281 } 282 283 static Thread* verify_thread_subroutine(Thread* gthread_value) { 284 Thread* correct_value = Thread::current(); 285 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 286 return correct_value; 287 } 288 289 void MacroAssembler::verify_thread() { 290 if (VerifyThread) { 291 // NOTE: this chops off the heads of the 64-bit O registers. 292 // make sure G2_thread contains the right value 293 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 294 mov(G1, L1); // avoid clobbering G1 295 // G2 saved below 296 mov(G3, L3); // avoid clobbering G3 297 mov(G4, L4); // avoid clobbering G4 298 mov(G5_method, L5); // avoid clobbering G5_method 299 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 300 delayed()->mov(G2_thread, O0); 301 302 mov(L1, G1); // Restore G1 303 // G2 restored below 304 mov(L3, G3); // restore G3 305 mov(L4, G4); // restore G4 306 mov(L5, G5_method); // restore G5_method 307 restore(O0, 0, G2_thread); 308 } 309 } 310 311 312 void MacroAssembler::save_thread(const Register thread_cache) { 313 verify_thread(); 314 if (thread_cache->is_valid()) { 315 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 316 mov(G2_thread, thread_cache); 317 } 318 if (VerifyThread) { 319 // smash G2_thread, as if the VM were about to anyway 320 set(0x67676767, G2_thread); 321 } 322 } 323 324 325 void MacroAssembler::restore_thread(const Register thread_cache) { 326 if (thread_cache->is_valid()) { 327 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 328 mov(thread_cache, G2_thread); 329 verify_thread(); 330 } else { 331 // do it the slow way 332 get_thread(); 333 } 334 } 335 336 337 // %%% maybe get rid of [re]set_last_Java_frame 338 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 339 assert_not_delayed(); 340 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 341 JavaFrameAnchor::flags_offset()); 342 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 343 344 // Always set last_Java_pc and flags first because once last_Java_sp is visible 345 // has_last_Java_frame is true and users will look at the rest of the fields. 346 // (Note: flags should always be zero before we get here so doesn't need to be set.) 347 348 #ifdef ASSERT 349 // Verify that flags was zeroed on return to Java 350 Label PcOk; 351 save_frame(0); // to avoid clobbering O0 352 ld_ptr(pc_addr, L0); 353 br_null_short(L0, Assembler::pt, PcOk); 354 STOP("last_Java_pc not zeroed before leaving Java"); 355 bind(PcOk); 356 357 // Verify that flags was zeroed on return to Java 358 Label FlagsOk; 359 ld(flags, L0); 360 tst(L0); 361 br(Assembler::zero, false, Assembler::pt, FlagsOk); 362 delayed() -> restore(); 363 STOP("flags not zeroed before leaving Java"); 364 bind(FlagsOk); 365 #endif /* ASSERT */ 366 // 367 // When returning from calling out from Java mode the frame anchor's last_Java_pc 368 // will always be set to NULL. It is set here so that if we are doing a call to 369 // native (not VM) that we capture the known pc and don't have to rely on the 370 // native call having a standard frame linkage where we can find the pc. 371 372 if (last_Java_pc->is_valid()) { 373 st_ptr(last_Java_pc, pc_addr); 374 } 375 376 #ifdef ASSERT 377 // Make sure that we have an odd stack 378 Label StackOk; 379 andcc(last_java_sp, 0x01, G0); 380 br(Assembler::notZero, false, Assembler::pt, StackOk); 381 delayed()->nop(); 382 STOP("Stack Not Biased in set_last_Java_frame"); 383 bind(StackOk); 384 #endif // ASSERT 385 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 386 add( last_java_sp, STACK_BIAS, G4_scratch ); 387 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 388 } 389 390 void MacroAssembler::reset_last_Java_frame(void) { 391 assert_not_delayed(); 392 393 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 394 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 395 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 396 397 #ifdef ASSERT 398 // check that it WAS previously set 399 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 400 ld_ptr(sp_addr, L0); 401 tst(L0); 402 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 403 restore(); 404 #endif // ASSERT 405 406 st_ptr(G0, sp_addr); 407 // Always return last_Java_pc to zero 408 st_ptr(G0, pc_addr); 409 // Always null flags after return to Java 410 st(G0, flags); 411 } 412 413 414 void MacroAssembler::call_VM_base( 415 Register oop_result, 416 Register thread_cache, 417 Register last_java_sp, 418 address entry_point, 419 int number_of_arguments, 420 bool check_exceptions) 421 { 422 assert_not_delayed(); 423 424 // determine last_java_sp register 425 if (!last_java_sp->is_valid()) { 426 last_java_sp = SP; 427 } 428 // debugging support 429 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 430 431 // 64-bit last_java_sp is biased! 432 set_last_Java_frame(last_java_sp, noreg); 433 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 434 save_thread(thread_cache); 435 // do the call 436 call(entry_point, relocInfo::runtime_call_type); 437 if (!VerifyThread) 438 delayed()->mov(G2_thread, O0); // pass thread as first argument 439 else 440 delayed()->nop(); // (thread already passed) 441 restore_thread(thread_cache); 442 reset_last_Java_frame(); 443 444 // check for pending exceptions. use Gtemp as scratch register. 445 if (check_exceptions) { 446 check_and_forward_exception(Gtemp); 447 } 448 449 #ifdef ASSERT 450 set(badHeapWordVal, G3); 451 set(badHeapWordVal, G4); 452 set(badHeapWordVal, G5); 453 #endif 454 455 // get oop result if there is one and reset the value in the thread 456 if (oop_result->is_valid()) { 457 get_vm_result(oop_result); 458 } 459 } 460 461 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 462 { 463 Label L; 464 465 check_and_handle_popframe(scratch_reg); 466 check_and_handle_earlyret(scratch_reg); 467 468 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 469 ld_ptr(exception_addr, scratch_reg); 470 br_null_short(scratch_reg, pt, L); 471 // we use O7 linkage so that forward_exception_entry has the issuing PC 472 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 473 delayed()->nop(); 474 bind(L); 475 } 476 477 478 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 479 } 480 481 482 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 483 } 484 485 486 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 487 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 488 } 489 490 491 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 492 // O0 is reserved for the thread 493 mov(arg_1, O1); 494 call_VM(oop_result, entry_point, 1, check_exceptions); 495 } 496 497 498 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 499 // O0 is reserved for the thread 500 mov(arg_1, O1); 501 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 502 call_VM(oop_result, entry_point, 2, check_exceptions); 503 } 504 505 506 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 507 // O0 is reserved for the thread 508 mov(arg_1, O1); 509 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 510 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 511 call_VM(oop_result, entry_point, 3, check_exceptions); 512 } 513 514 515 516 // Note: The following call_VM overloadings are useful when a "save" 517 // has already been performed by a stub, and the last Java frame is 518 // the previous one. In that case, last_java_sp must be passed as FP 519 // instead of SP. 520 521 522 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 523 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 524 } 525 526 527 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 528 // O0 is reserved for the thread 529 mov(arg_1, O1); 530 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 531 } 532 533 534 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 535 // O0 is reserved for the thread 536 mov(arg_1, O1); 537 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 538 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 539 } 540 541 542 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 543 // O0 is reserved for the thread 544 mov(arg_1, O1); 545 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 546 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 547 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 548 } 549 550 551 552 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 553 assert_not_delayed(); 554 save_thread(thread_cache); 555 // do the call 556 call(entry_point, relocInfo::runtime_call_type); 557 delayed()->nop(); 558 restore_thread(thread_cache); 559 #ifdef ASSERT 560 set(badHeapWordVal, G3); 561 set(badHeapWordVal, G4); 562 set(badHeapWordVal, G5); 563 #endif 564 } 565 566 567 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 568 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 569 } 570 571 572 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 573 mov(arg_1, O0); 574 call_VM_leaf(thread_cache, entry_point, 1); 575 } 576 577 578 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 579 mov(arg_1, O0); 580 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 581 call_VM_leaf(thread_cache, entry_point, 2); 582 } 583 584 585 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 586 mov(arg_1, O0); 587 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 588 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 589 call_VM_leaf(thread_cache, entry_point, 3); 590 } 591 592 593 void MacroAssembler::get_vm_result(Register oop_result) { 594 verify_thread(); 595 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 596 ld_ptr( vm_result_addr, oop_result); 597 st_ptr(G0, vm_result_addr); 598 verify_oop(oop_result); 599 } 600 601 602 void MacroAssembler::get_vm_result_2(Register metadata_result) { 603 verify_thread(); 604 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 605 ld_ptr(vm_result_addr_2, metadata_result); 606 st_ptr(G0, vm_result_addr_2); 607 } 608 609 610 // We require that C code which does not return a value in vm_result will 611 // leave it undisturbed. 612 void MacroAssembler::set_vm_result(Register oop_result) { 613 verify_thread(); 614 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 615 verify_oop(oop_result); 616 617 # ifdef ASSERT 618 // Check that we are not overwriting any other oop. 619 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 620 ld_ptr(vm_result_addr, L0); 621 tst(L0); 622 restore(); 623 breakpoint_trap(notZero, Assembler::ptr_cc); 624 // } 625 # endif 626 627 st_ptr(oop_result, vm_result_addr); 628 } 629 630 631 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 632 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 633 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 634 relocate(rspec); 635 call(entry, relocInfo::none); 636 if (emit_delay) { 637 delayed()->nop(); 638 } 639 } 640 641 void MacroAssembler::card_table_write(jbyte* byte_map_base, 642 Register tmp, Register obj) { 643 srlx(obj, CardTableModRefBS::card_shift, obj); 644 assert(tmp != obj, "need separate temp reg"); 645 set((address) byte_map_base, tmp); 646 stb(G0, tmp, obj); 647 } 648 649 650 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 651 address save_pc; 652 int shiftcnt; 653 # ifdef CHECK_DELAY 654 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 655 # endif 656 v9_dep(); 657 save_pc = pc(); 658 659 int msb32 = (int) (addrlit.value() >> 32); 660 int lsb32 = (int) (addrlit.value()); 661 662 if (msb32 == 0 && lsb32 >= 0) { 663 Assembler::sethi(lsb32, d, addrlit.rspec()); 664 } 665 else if (msb32 == -1) { 666 Assembler::sethi(~lsb32, d, addrlit.rspec()); 667 xor3(d, ~low10(~0), d); 668 } 669 else { 670 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 671 if (msb32 & 0x3ff) // Any bits? 672 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 673 if (lsb32 & 0xFFFFFC00) { // done? 674 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 675 sllx(d, 12, d); // Make room for next 12 bits 676 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 677 shiftcnt = 0; // We already shifted 678 } 679 else 680 shiftcnt = 12; 681 if ((lsb32 >> 10) & 0x3ff) { 682 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 683 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 684 shiftcnt = 0; 685 } 686 else 687 shiftcnt = 10; 688 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 689 } 690 else 691 sllx(d, 32, d); 692 } 693 // Pad out the instruction sequence so it can be patched later. 694 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 695 addrlit.rtype() != relocInfo::runtime_call_type)) { 696 while (pc() < (save_pc + (7 * BytesPerInstWord))) 697 nop(); 698 } 699 } 700 701 702 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 703 internal_sethi(addrlit, d, false); 704 } 705 706 707 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 708 internal_sethi(addrlit, d, true); 709 } 710 711 712 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 713 if (worst_case) return 7; 714 intptr_t iaddr = (intptr_t) a; 715 int msb32 = (int) (iaddr >> 32); 716 int lsb32 = (int) (iaddr); 717 int count; 718 if (msb32 == 0 && lsb32 >= 0) 719 count = 1; 720 else if (msb32 == -1) 721 count = 2; 722 else { 723 count = 2; 724 if (msb32 & 0x3ff) 725 count++; 726 if (lsb32 & 0xFFFFFC00 ) { 727 if ((lsb32 >> 20) & 0xfff) count += 2; 728 if ((lsb32 >> 10) & 0x3ff) count += 2; 729 } 730 } 731 return count; 732 } 733 734 int MacroAssembler::worst_case_insts_for_set() { 735 return insts_for_sethi(NULL, true) + 1; 736 } 737 738 739 // Keep in sync with MacroAssembler::insts_for_internal_set 740 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 741 intptr_t value = addrlit.value(); 742 743 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 744 // can optimize 745 if (-4096 <= value && value <= 4095) { 746 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 747 return; 748 } 749 if (inv_hi22(hi22(value)) == value) { 750 sethi(addrlit, d); 751 return; 752 } 753 } 754 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 755 internal_sethi(addrlit, d, ForceRelocatable); 756 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 757 add(d, addrlit.low10(), d, addrlit.rspec()); 758 } 759 } 760 761 // Keep in sync with MacroAssembler::internal_set 762 int MacroAssembler::insts_for_internal_set(intptr_t value) { 763 // can optimize 764 if (-4096 <= value && value <= 4095) { 765 return 1; 766 } 767 if (inv_hi22(hi22(value)) == value) { 768 return insts_for_sethi((address) value); 769 } 770 int count = insts_for_sethi((address) value); 771 AddressLiteral al(value); 772 if (al.low10() != 0) { 773 count++; 774 } 775 return count; 776 } 777 778 void MacroAssembler::set(const AddressLiteral& al, Register d) { 779 internal_set(al, d, false); 780 } 781 782 void MacroAssembler::set(intptr_t value, Register d) { 783 AddressLiteral al(value); 784 internal_set(al, d, false); 785 } 786 787 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 788 AddressLiteral al(addr, rspec); 789 internal_set(al, d, false); 790 } 791 792 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 793 internal_set(al, d, true); 794 } 795 796 void MacroAssembler::patchable_set(intptr_t value, Register d) { 797 AddressLiteral al(value); 798 internal_set(al, d, true); 799 } 800 801 802 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 803 assert_not_delayed(); 804 v9_dep(); 805 806 int hi = (int)(value >> 32); 807 int lo = (int)(value & ~0); 808 int bits_33to2 = (int)((value >> 2) & ~0); 809 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 810 if (Assembler::is_simm13(lo) && value == lo) { 811 or3(G0, lo, d); 812 } else if (hi == 0) { 813 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 814 if (low10(lo) != 0) 815 or3(d, low10(lo), d); 816 } 817 else if ((hi >> 2) == 0) { 818 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 819 sllx(d, 2, d); 820 if (low12(lo) != 0) 821 or3(d, low12(lo), d); 822 } 823 else if (hi == -1) { 824 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 825 xor3(d, low10(lo) ^ ~low10(~0), d); 826 } 827 else if (lo == 0) { 828 if (Assembler::is_simm13(hi)) { 829 or3(G0, hi, d); 830 } else { 831 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 832 if (low10(hi) != 0) 833 or3(d, low10(hi), d); 834 } 835 sllx(d, 32, d); 836 } 837 else { 838 Assembler::sethi(hi, tmp); 839 Assembler::sethi(lo, d); // macro assembler version sign-extends 840 if (low10(hi) != 0) 841 or3 (tmp, low10(hi), tmp); 842 if (low10(lo) != 0) 843 or3 ( d, low10(lo), d); 844 sllx(tmp, 32, tmp); 845 or3 (d, tmp, d); 846 } 847 } 848 849 int MacroAssembler::insts_for_set64(jlong value) { 850 v9_dep(); 851 852 int hi = (int) (value >> 32); 853 int lo = (int) (value & ~0); 854 int count = 0; 855 856 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 857 if (Assembler::is_simm13(lo) && value == lo) { 858 count++; 859 } else if (hi == 0) { 860 count++; 861 if (low10(lo) != 0) 862 count++; 863 } 864 else if (hi == -1) { 865 count += 2; 866 } 867 else if (lo == 0) { 868 if (Assembler::is_simm13(hi)) { 869 count++; 870 } else { 871 count++; 872 if (low10(hi) != 0) 873 count++; 874 } 875 count++; 876 } 877 else { 878 count += 2; 879 if (low10(hi) != 0) 880 count++; 881 if (low10(lo) != 0) 882 count++; 883 count += 2; 884 } 885 return count; 886 } 887 888 // compute size in bytes of sparc frame, given 889 // number of extraWords 890 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 891 892 int nWords = frame::memory_parameter_word_sp_offset; 893 894 nWords += extraWords; 895 896 if (nWords & 1) ++nWords; // round up to double-word 897 898 return nWords * BytesPerWord; 899 } 900 901 902 // save_frame: given number of "extra" words in frame, 903 // issue approp. save instruction (p 200, v8 manual) 904 905 void MacroAssembler::save_frame(int extraWords) { 906 int delta = -total_frame_size_in_bytes(extraWords); 907 if (is_simm13(delta)) { 908 save(SP, delta, SP); 909 } else { 910 set(delta, G3_scratch); 911 save(SP, G3_scratch, SP); 912 } 913 } 914 915 916 void MacroAssembler::save_frame_c1(int size_in_bytes) { 917 if (is_simm13(-size_in_bytes)) { 918 save(SP, -size_in_bytes, SP); 919 } else { 920 set(-size_in_bytes, G3_scratch); 921 save(SP, G3_scratch, SP); 922 } 923 } 924 925 926 void MacroAssembler::save_frame_and_mov(int extraWords, 927 Register s1, Register d1, 928 Register s2, Register d2) { 929 assert_not_delayed(); 930 931 // The trick here is to use precisely the same memory word 932 // that trap handlers also use to save the register. 933 // This word cannot be used for any other purpose, but 934 // it works fine to save the register's value, whether or not 935 // an interrupt flushes register windows at any given moment! 936 Address s1_addr; 937 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 938 s1_addr = s1->address_in_saved_window(); 939 st_ptr(s1, s1_addr); 940 } 941 942 Address s2_addr; 943 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 944 s2_addr = s2->address_in_saved_window(); 945 st_ptr(s2, s2_addr); 946 } 947 948 save_frame(extraWords); 949 950 if (s1_addr.base() == SP) { 951 ld_ptr(s1_addr.after_save(), d1); 952 } else if (s1->is_valid()) { 953 mov(s1->after_save(), d1); 954 } 955 956 if (s2_addr.base() == SP) { 957 ld_ptr(s2_addr.after_save(), d2); 958 } else if (s2->is_valid()) { 959 mov(s2->after_save(), d2); 960 } 961 } 962 963 964 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 965 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 966 int index = oop_recorder()->allocate_metadata_index(obj); 967 RelocationHolder rspec = metadata_Relocation::spec(index); 968 return AddressLiteral((address)obj, rspec); 969 } 970 971 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 972 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 973 int index = oop_recorder()->find_index(obj); 974 RelocationHolder rspec = metadata_Relocation::spec(index); 975 return AddressLiteral((address)obj, rspec); 976 } 977 978 979 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 980 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 981 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 982 int oop_index = oop_recorder()->find_index(obj); 983 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 984 } 985 986 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 987 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 988 int oop_index = oop_recorder()->find_index(obj); 989 RelocationHolder rspec = oop_Relocation::spec(oop_index); 990 991 assert_not_delayed(); 992 // Relocation with special format (see relocInfo_sparc.hpp). 993 relocate(rspec, 1); 994 // Assembler::sethi(0x3fffff, d); 995 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 996 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 997 add(d, 0x3ff, d); 998 999 } 1000 1001 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1002 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1003 int klass_index = oop_recorder()->find_index(k); 1004 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1005 narrowOop encoded_k = Klass::encode_klass(k); 1006 1007 assert_not_delayed(); 1008 // Relocation with special format (see relocInfo_sparc.hpp). 1009 relocate(rspec, 1); 1010 // Assembler::sethi(encoded_k, d); 1011 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1012 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1013 add(d, low10(encoded_k), d); 1014 1015 } 1016 1017 void MacroAssembler::align(int modulus) { 1018 while (offset() % modulus != 0) nop(); 1019 } 1020 1021 void RegistersForDebugging::print(outputStream* s) { 1022 FlagSetting fs(Debugging, true); 1023 int j; 1024 for (j = 0; j < 8; ++j) { 1025 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1026 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1027 } 1028 s->cr(); 1029 1030 for (j = 0; j < 8; ++j) { 1031 s->print("l%d = ", j); os::print_location(s, l[j]); 1032 } 1033 s->cr(); 1034 1035 for (j = 0; j < 8; ++j) { 1036 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1037 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1038 } 1039 s->cr(); 1040 1041 for (j = 0; j < 8; ++j) { 1042 s->print("g%d = ", j); os::print_location(s, g[j]); 1043 } 1044 s->cr(); 1045 1046 // print out floats with compression 1047 for (j = 0; j < 32; ) { 1048 jfloat val = f[j]; 1049 int last = j; 1050 for ( ; last+1 < 32; ++last ) { 1051 char b1[1024], b2[1024]; 1052 sprintf(b1, "%f", val); 1053 sprintf(b2, "%f", f[last+1]); 1054 if (strcmp(b1, b2)) 1055 break; 1056 } 1057 s->print("f%d", j); 1058 if ( j != last ) s->print(" - f%d", last); 1059 s->print(" = %f", val); 1060 s->fill_to(25); 1061 s->print_cr(" (0x%x)", *(int*)&val); 1062 j = last + 1; 1063 } 1064 s->cr(); 1065 1066 // and doubles (evens only) 1067 for (j = 0; j < 32; ) { 1068 jdouble val = d[j]; 1069 int last = j; 1070 for ( ; last+1 < 32; ++last ) { 1071 char b1[1024], b2[1024]; 1072 sprintf(b1, "%f", val); 1073 sprintf(b2, "%f", d[last+1]); 1074 if (strcmp(b1, b2)) 1075 break; 1076 } 1077 s->print("d%d", 2 * j); 1078 if ( j != last ) s->print(" - d%d", last); 1079 s->print(" = %f", val); 1080 s->fill_to(30); 1081 s->print("(0x%x)", *(int*)&val); 1082 s->fill_to(42); 1083 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1084 j = last + 1; 1085 } 1086 s->cr(); 1087 } 1088 1089 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1090 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1091 a->flushw(); 1092 int i; 1093 for (i = 0; i < 8; ++i) { 1094 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1095 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1096 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1097 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1098 } 1099 for (i = 0; i < 32; ++i) { 1100 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1101 } 1102 for (i = 0; i < 64; i += 2) { 1103 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1104 } 1105 } 1106 1107 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1108 for (int i = 1; i < 8; ++i) { 1109 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1110 } 1111 for (int j = 0; j < 32; ++j) { 1112 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1113 } 1114 for (int k = 0; k < 64; k += 2) { 1115 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1116 } 1117 } 1118 1119 1120 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1121 void MacroAssembler::push_fTOS() { 1122 // %%%%%% need to implement this 1123 } 1124 1125 // pops double TOS element from CPU stack and pushes on FPU stack 1126 void MacroAssembler::pop_fTOS() { 1127 // %%%%%% need to implement this 1128 } 1129 1130 void MacroAssembler::empty_FPU_stack() { 1131 // %%%%%% need to implement this 1132 } 1133 1134 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1135 // plausibility check for oops 1136 if (!VerifyOops) return; 1137 1138 if (reg == G0) return; // always NULL, which is always an oop 1139 1140 BLOCK_COMMENT("verify_oop {"); 1141 char buffer[64]; 1142 #ifdef COMPILER1 1143 if (CommentedAssembly) { 1144 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1145 block_comment(buffer); 1146 } 1147 #endif 1148 1149 const char* real_msg = NULL; 1150 { 1151 ResourceMark rm; 1152 stringStream ss; 1153 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1154 real_msg = code_string(ss.as_string()); 1155 } 1156 1157 // Call indirectly to solve generation ordering problem 1158 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1159 1160 // Make some space on stack above the current register window. 1161 // Enough to hold 8 64-bit registers. 1162 add(SP,-8*8,SP); 1163 1164 // Save some 64-bit registers; a normal 'save' chops the heads off 1165 // of 64-bit longs in the 32-bit build. 1166 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1167 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1168 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1169 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1170 1171 // Size of set() should stay the same 1172 patchable_set((intptr_t)real_msg, O1); 1173 // Load address to call to into O7 1174 load_ptr_contents(a, O7); 1175 // Register call to verify_oop_subroutine 1176 callr(O7, G0); 1177 delayed()->nop(); 1178 // recover frame size 1179 add(SP, 8*8,SP); 1180 BLOCK_COMMENT("} verify_oop"); 1181 } 1182 1183 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1184 // plausibility check for oops 1185 if (!VerifyOops) return; 1186 1187 const char* real_msg = NULL; 1188 { 1189 ResourceMark rm; 1190 stringStream ss; 1191 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1192 real_msg = code_string(ss.as_string()); 1193 } 1194 1195 // Call indirectly to solve generation ordering problem 1196 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1197 1198 // Make some space on stack above the current register window. 1199 // Enough to hold 8 64-bit registers. 1200 add(SP,-8*8,SP); 1201 1202 // Save some 64-bit registers; a normal 'save' chops the heads off 1203 // of 64-bit longs in the 32-bit build. 1204 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1205 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1206 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1207 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1208 1209 // Size of set() should stay the same 1210 patchable_set((intptr_t)real_msg, O1); 1211 // Load address to call to into O7 1212 load_ptr_contents(a, O7); 1213 // Register call to verify_oop_subroutine 1214 callr(O7, G0); 1215 delayed()->nop(); 1216 // recover frame size 1217 add(SP, 8*8,SP); 1218 } 1219 1220 // side-door communication with signalHandler in os_solaris.cpp 1221 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1222 1223 // This macro is expanded just once; it creates shared code. Contract: 1224 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1225 // registers, including flags. May not use a register 'save', as this blows 1226 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1227 // call. 1228 void MacroAssembler::verify_oop_subroutine() { 1229 // Leaf call; no frame. 1230 Label succeed, fail, null_or_fail; 1231 1232 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1233 // O0 is now the oop to be checked. O7 is the return address. 1234 Register O0_obj = O0; 1235 1236 // Save some more registers for temps. 1237 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1238 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1239 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1240 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1241 1242 // Save flags 1243 Register O5_save_flags = O5; 1244 rdccr( O5_save_flags ); 1245 1246 { // count number of verifies 1247 Register O2_adr = O2; 1248 Register O3_accum = O3; 1249 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1250 } 1251 1252 Register O2_mask = O2; 1253 Register O3_bits = O3; 1254 Register O4_temp = O4; 1255 1256 // mark lower end of faulting range 1257 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1258 _verify_oop_implicit_branch[0] = pc(); 1259 1260 // We can't check the mark oop because it could be in the process of 1261 // locking or unlocking while this is running. 1262 set(Universe::verify_oop_mask (), O2_mask); 1263 set(Universe::verify_oop_bits (), O3_bits); 1264 1265 // assert((obj & oop_mask) == oop_bits); 1266 and3(O0_obj, O2_mask, O4_temp); 1267 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1268 1269 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1270 // the null_or_fail case is useless; must test for null separately 1271 br_null_short(O0_obj, pn, succeed); 1272 } 1273 1274 // Check the Klass* of this object for being in the right area of memory. 1275 // Cannot do the load in the delay above slot in case O0 is null 1276 load_klass(O0_obj, O0_obj); 1277 // assert((klass != NULL) 1278 br_null_short(O0_obj, pn, fail); 1279 1280 wrccr( O5_save_flags ); // Restore CCR's 1281 1282 // mark upper end of faulting range 1283 _verify_oop_implicit_branch[1] = pc(); 1284 1285 //----------------------- 1286 // all tests pass 1287 bind(succeed); 1288 1289 // Restore prior 64-bit registers 1290 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1291 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1292 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1293 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1294 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1295 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1296 1297 retl(); // Leaf return; restore prior O7 in delay slot 1298 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1299 1300 //----------------------- 1301 bind(null_or_fail); // nulls are less common but OK 1302 br_null(O0_obj, false, pt, succeed); 1303 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1304 1305 //----------------------- 1306 // report failure: 1307 bind(fail); 1308 _verify_oop_implicit_branch[2] = pc(); 1309 1310 wrccr( O5_save_flags ); // Restore CCR's 1311 1312 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1313 1314 // stop_subroutine expects message pointer in I1. 1315 mov(I1, O1); 1316 1317 // Restore prior 64-bit registers 1318 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1319 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1320 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1321 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1322 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1323 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1324 1325 // factor long stop-sequence into subroutine to save space 1326 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1327 1328 // call indirectly to solve generation ordering problem 1329 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1330 load_ptr_contents(al, O5); 1331 jmpl(O5, 0, O7); 1332 delayed()->nop(); 1333 } 1334 1335 1336 void MacroAssembler::stop(const char* msg) { 1337 // save frame first to get O7 for return address 1338 // add one word to size in case struct is odd number of words long 1339 // It must be doubleword-aligned for storing doubles into it. 1340 1341 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1342 1343 // stop_subroutine expects message pointer in I1. 1344 // Size of set() should stay the same 1345 patchable_set((intptr_t)msg, O1); 1346 1347 // factor long stop-sequence into subroutine to save space 1348 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1349 1350 // call indirectly to solve generation ordering problem 1351 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1352 load_ptr_contents(a, O5); 1353 jmpl(O5, 0, O7); 1354 delayed()->nop(); 1355 1356 breakpoint_trap(); // make stop actually stop rather than writing 1357 // unnoticeable results in the output files. 1358 1359 // restore(); done in callee to save space! 1360 } 1361 1362 1363 void MacroAssembler::warn(const char* msg) { 1364 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1365 RegistersForDebugging::save_registers(this); 1366 mov(O0, L0); 1367 // Size of set() should stay the same 1368 patchable_set((intptr_t)msg, O0); 1369 call( CAST_FROM_FN_PTR(address, warning) ); 1370 delayed()->nop(); 1371 // ret(); 1372 // delayed()->restore(); 1373 RegistersForDebugging::restore_registers(this, L0); 1374 restore(); 1375 } 1376 1377 1378 void MacroAssembler::untested(const char* what) { 1379 // We must be able to turn interactive prompting off 1380 // in order to run automated test scripts on the VM 1381 // Use the flag ShowMessageBoxOnError 1382 1383 const char* b = NULL; 1384 { 1385 ResourceMark rm; 1386 stringStream ss; 1387 ss.print("untested: %s", what); 1388 b = code_string(ss.as_string()); 1389 } 1390 if (ShowMessageBoxOnError) { STOP(b); } 1391 else { warn(b); } 1392 } 1393 1394 1395 void MacroAssembler::stop_subroutine() { 1396 RegistersForDebugging::save_registers(this); 1397 1398 // for the sake of the debugger, stick a PC on the current frame 1399 // (this assumes that the caller has performed an extra "save") 1400 mov(I7, L7); 1401 add(O7, -7 * BytesPerInt, I7); 1402 1403 save_frame(); // one more save to free up another O7 register 1404 mov(I0, O1); // addr of reg save area 1405 1406 // We expect pointer to message in I1. Caller must set it up in O1 1407 mov(I1, O0); // get msg 1408 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1409 delayed()->nop(); 1410 1411 restore(); 1412 1413 RegistersForDebugging::restore_registers(this, O0); 1414 1415 save_frame(0); 1416 call(CAST_FROM_FN_PTR(address,breakpoint)); 1417 delayed()->nop(); 1418 restore(); 1419 1420 mov(L7, I7); 1421 retl(); 1422 delayed()->restore(); // see stop above 1423 } 1424 1425 1426 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1427 if ( ShowMessageBoxOnError ) { 1428 JavaThread* thread = JavaThread::current(); 1429 JavaThreadState saved_state = thread->thread_state(); 1430 thread->set_thread_state(_thread_in_vm); 1431 { 1432 // In order to get locks work, we need to fake a in_VM state 1433 ttyLocker ttyl; 1434 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1435 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1436 BytecodeCounter::print(); 1437 } 1438 if (os::message_box(msg, "Execution stopped, print registers?")) 1439 regs->print(::tty); 1440 } 1441 BREAKPOINT; 1442 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1443 } 1444 else { 1445 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1446 } 1447 assert(false, "DEBUG MESSAGE: %s", msg); 1448 } 1449 1450 1451 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1452 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1453 Label no_extras; 1454 br( negative, true, pt, no_extras ); // if neg, clear reg 1455 delayed()->set(0, Rresult); // annuled, so only if taken 1456 bind( no_extras ); 1457 } 1458 1459 1460 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1461 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1462 bclr(1, Rresult); 1463 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1464 } 1465 1466 1467 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1468 calc_frame_size(Rextra_words, Rresult); 1469 neg(Rresult); 1470 save(SP, Rresult, SP); 1471 } 1472 1473 1474 // --------------------------------------------------------- 1475 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1476 switch (c) { 1477 /*case zero: */ 1478 case Assembler::equal: return Assembler::rc_z; 1479 case Assembler::lessEqual: return Assembler::rc_lez; 1480 case Assembler::less: return Assembler::rc_lz; 1481 /*case notZero:*/ 1482 case Assembler::notEqual: return Assembler::rc_nz; 1483 case Assembler::greater: return Assembler::rc_gz; 1484 case Assembler::greaterEqual: return Assembler::rc_gez; 1485 } 1486 ShouldNotReachHere(); 1487 return Assembler::rc_z; 1488 } 1489 1490 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1491 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1492 tst(s1); 1493 br (c, a, p, L); 1494 } 1495 1496 // Compares a pointer register with zero and branches on null. 1497 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1498 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1499 assert_not_delayed(); 1500 bpr( rc_z, a, p, s1, L ); 1501 } 1502 1503 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1504 assert_not_delayed(); 1505 bpr( rc_nz, a, p, s1, L ); 1506 } 1507 1508 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1509 1510 // Compare integer (32 bit) values (icc only). 1511 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1512 Predict p, Label& L) { 1513 assert_not_delayed(); 1514 if (use_cbcond(L)) { 1515 Assembler::cbcond(c, icc, s1, s2, L); 1516 } else { 1517 cmp(s1, s2); 1518 br(c, false, p, L); 1519 delayed()->nop(); 1520 } 1521 } 1522 1523 // Compare integer (32 bit) values (icc only). 1524 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1525 Predict p, Label& L) { 1526 assert_not_delayed(); 1527 if (is_simm(simm13a,5) && use_cbcond(L)) { 1528 Assembler::cbcond(c, icc, s1, simm13a, L); 1529 } else { 1530 cmp(s1, simm13a); 1531 br(c, false, p, L); 1532 delayed()->nop(); 1533 } 1534 } 1535 1536 // Branch that tests xcc in LP64 and icc in !LP64 1537 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1538 Predict p, Label& L) { 1539 assert_not_delayed(); 1540 if (use_cbcond(L)) { 1541 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1542 } else { 1543 cmp(s1, s2); 1544 brx(c, false, p, L); 1545 delayed()->nop(); 1546 } 1547 } 1548 1549 // Branch that tests xcc in LP64 and icc in !LP64 1550 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1551 Predict p, Label& L) { 1552 assert_not_delayed(); 1553 if (is_simm(simm13a,5) && use_cbcond(L)) { 1554 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1555 } else { 1556 cmp(s1, simm13a); 1557 brx(c, false, p, L); 1558 delayed()->nop(); 1559 } 1560 } 1561 1562 // Short branch version for compares a pointer with zero. 1563 1564 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1565 assert_not_delayed(); 1566 if (use_cbcond(L)) { 1567 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1568 return; 1569 } 1570 br_null(s1, false, p, L); 1571 delayed()->nop(); 1572 } 1573 1574 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1575 assert_not_delayed(); 1576 if (use_cbcond(L)) { 1577 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1578 return; 1579 } 1580 br_notnull(s1, false, p, L); 1581 delayed()->nop(); 1582 } 1583 1584 // Unconditional short branch 1585 void MacroAssembler::ba_short(Label& L) { 1586 if (use_cbcond(L)) { 1587 Assembler::cbcond(equal, icc, G0, G0, L); 1588 return; 1589 } 1590 br(always, false, pt, L); 1591 delayed()->nop(); 1592 } 1593 1594 // instruction sequences factored across compiler & interpreter 1595 1596 1597 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1598 Register Rb_hi, Register Rb_low, 1599 Register Rresult) { 1600 1601 Label check_low_parts, done; 1602 1603 cmp(Ra_hi, Rb_hi ); // compare hi parts 1604 br(equal, true, pt, check_low_parts); 1605 delayed()->cmp(Ra_low, Rb_low); // test low parts 1606 1607 // And, with an unsigned comparison, it does not matter if the numbers 1608 // are negative or not. 1609 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1610 // The second one is bigger (unsignedly). 1611 1612 // Other notes: The first move in each triplet can be unconditional 1613 // (and therefore probably prefetchable). 1614 // And the equals case for the high part does not need testing, 1615 // since that triplet is reached only after finding the high halves differ. 1616 1617 mov(-1, Rresult); 1618 ba(done); 1619 delayed()->movcc(greater, false, icc, 1, Rresult); 1620 1621 bind(check_low_parts); 1622 1623 mov( -1, Rresult); 1624 movcc(equal, false, icc, 0, Rresult); 1625 movcc(greaterUnsigned, false, icc, 1, Rresult); 1626 1627 bind(done); 1628 } 1629 1630 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1631 subcc( G0, Rlow, Rlow ); 1632 subc( G0, Rhi, Rhi ); 1633 } 1634 1635 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1636 Register Rcount, 1637 Register Rout_high, Register Rout_low, 1638 Register Rtemp ) { 1639 1640 1641 Register Ralt_count = Rtemp; 1642 Register Rxfer_bits = Rtemp; 1643 1644 assert( Ralt_count != Rin_high 1645 && Ralt_count != Rin_low 1646 && Ralt_count != Rcount 1647 && Rxfer_bits != Rin_low 1648 && Rxfer_bits != Rin_high 1649 && Rxfer_bits != Rcount 1650 && Rxfer_bits != Rout_low 1651 && Rout_low != Rin_high, 1652 "register alias checks"); 1653 1654 Label big_shift, done; 1655 1656 // This code can be optimized to use the 64 bit shifts in V9. 1657 // Here we use the 32 bit shifts. 1658 1659 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1660 subcc(Rcount, 31, Ralt_count); 1661 br(greater, true, pn, big_shift); 1662 delayed()->dec(Ralt_count); 1663 1664 // shift < 32 bits, Ralt_count = Rcount-31 1665 1666 // We get the transfer bits by shifting right by 32-count the low 1667 // register. This is done by shifting right by 31-count and then by one 1668 // more to take care of the special (rare) case where count is zero 1669 // (shifting by 32 would not work). 1670 1671 neg(Ralt_count); 1672 1673 // The order of the next two instructions is critical in the case where 1674 // Rin and Rout are the same and should not be reversed. 1675 1676 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1677 if (Rcount != Rout_low) { 1678 sll(Rin_low, Rcount, Rout_low); // low half 1679 } 1680 sll(Rin_high, Rcount, Rout_high); 1681 if (Rcount == Rout_low) { 1682 sll(Rin_low, Rcount, Rout_low); // low half 1683 } 1684 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1685 ba(done); 1686 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1687 1688 // shift >= 32 bits, Ralt_count = Rcount-32 1689 bind(big_shift); 1690 sll(Rin_low, Ralt_count, Rout_high ); 1691 clr(Rout_low); 1692 1693 bind(done); 1694 } 1695 1696 1697 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1698 Register Rcount, 1699 Register Rout_high, Register Rout_low, 1700 Register Rtemp ) { 1701 1702 Register Ralt_count = Rtemp; 1703 Register Rxfer_bits = Rtemp; 1704 1705 assert( Ralt_count != Rin_high 1706 && Ralt_count != Rin_low 1707 && Ralt_count != Rcount 1708 && Rxfer_bits != Rin_low 1709 && Rxfer_bits != Rin_high 1710 && Rxfer_bits != Rcount 1711 && Rxfer_bits != Rout_high 1712 && Rout_high != Rin_low, 1713 "register alias checks"); 1714 1715 Label big_shift, done; 1716 1717 // This code can be optimized to use the 64 bit shifts in V9. 1718 // Here we use the 32 bit shifts. 1719 1720 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1721 subcc(Rcount, 31, Ralt_count); 1722 br(greater, true, pn, big_shift); 1723 delayed()->dec(Ralt_count); 1724 1725 // shift < 32 bits, Ralt_count = Rcount-31 1726 1727 // We get the transfer bits by shifting left by 32-count the high 1728 // register. This is done by shifting left by 31-count and then by one 1729 // more to take care of the special (rare) case where count is zero 1730 // (shifting by 32 would not work). 1731 1732 neg(Ralt_count); 1733 if (Rcount != Rout_low) { 1734 srl(Rin_low, Rcount, Rout_low); 1735 } 1736 1737 // The order of the next two instructions is critical in the case where 1738 // Rin and Rout are the same and should not be reversed. 1739 1740 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1741 sra(Rin_high, Rcount, Rout_high ); // high half 1742 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1743 if (Rcount == Rout_low) { 1744 srl(Rin_low, Rcount, Rout_low); 1745 } 1746 ba(done); 1747 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1748 1749 // shift >= 32 bits, Ralt_count = Rcount-32 1750 bind(big_shift); 1751 1752 sra(Rin_high, Ralt_count, Rout_low); 1753 sra(Rin_high, 31, Rout_high); // sign into hi 1754 1755 bind( done ); 1756 } 1757 1758 1759 1760 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1761 Register Rcount, 1762 Register Rout_high, Register Rout_low, 1763 Register Rtemp ) { 1764 1765 Register Ralt_count = Rtemp; 1766 Register Rxfer_bits = Rtemp; 1767 1768 assert( Ralt_count != Rin_high 1769 && Ralt_count != Rin_low 1770 && Ralt_count != Rcount 1771 && Rxfer_bits != Rin_low 1772 && Rxfer_bits != Rin_high 1773 && Rxfer_bits != Rcount 1774 && Rxfer_bits != Rout_high 1775 && Rout_high != Rin_low, 1776 "register alias checks"); 1777 1778 Label big_shift, done; 1779 1780 // This code can be optimized to use the 64 bit shifts in V9. 1781 // Here we use the 32 bit shifts. 1782 1783 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1784 subcc(Rcount, 31, Ralt_count); 1785 br(greater, true, pn, big_shift); 1786 delayed()->dec(Ralt_count); 1787 1788 // shift < 32 bits, Ralt_count = Rcount-31 1789 1790 // We get the transfer bits by shifting left by 32-count the high 1791 // register. This is done by shifting left by 31-count and then by one 1792 // more to take care of the special (rare) case where count is zero 1793 // (shifting by 32 would not work). 1794 1795 neg(Ralt_count); 1796 if (Rcount != Rout_low) { 1797 srl(Rin_low, Rcount, Rout_low); 1798 } 1799 1800 // The order of the next two instructions is critical in the case where 1801 // Rin and Rout are the same and should not be reversed. 1802 1803 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1804 srl(Rin_high, Rcount, Rout_high ); // high half 1805 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1806 if (Rcount == Rout_low) { 1807 srl(Rin_low, Rcount, Rout_low); 1808 } 1809 ba(done); 1810 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1811 1812 // shift >= 32 bits, Ralt_count = Rcount-32 1813 bind(big_shift); 1814 1815 srl(Rin_high, Ralt_count, Rout_low); 1816 clr(Rout_high); 1817 1818 bind( done ); 1819 } 1820 1821 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1822 cmp(Ra, Rb); 1823 mov(-1, Rresult); 1824 movcc(equal, false, xcc, 0, Rresult); 1825 movcc(greater, false, xcc, 1, Rresult); 1826 } 1827 1828 1829 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1830 switch (size_in_bytes) { 1831 case 8: ld_long(src, dst); break; 1832 case 4: ld( src, dst); break; 1833 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1834 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1835 default: ShouldNotReachHere(); 1836 } 1837 } 1838 1839 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1840 switch (size_in_bytes) { 1841 case 8: st_long(src, dst); break; 1842 case 4: st( src, dst); break; 1843 case 2: sth( src, dst); break; 1844 case 1: stb( src, dst); break; 1845 default: ShouldNotReachHere(); 1846 } 1847 } 1848 1849 1850 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1851 FloatRegister Fa, FloatRegister Fb, 1852 Register Rresult) { 1853 if (is_float) { 1854 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1855 } else { 1856 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1857 } 1858 1859 if (unordered_result == 1) { 1860 mov( -1, Rresult); 1861 movcc(f_equal, true, fcc0, 0, Rresult); 1862 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1863 } else { 1864 mov( -1, Rresult); 1865 movcc(f_equal, true, fcc0, 0, Rresult); 1866 movcc(f_greater, true, fcc0, 1, Rresult); 1867 } 1868 } 1869 1870 1871 void MacroAssembler::save_all_globals_into_locals() { 1872 mov(G1,L1); 1873 mov(G2,L2); 1874 mov(G3,L3); 1875 mov(G4,L4); 1876 mov(G5,L5); 1877 mov(G6,L6); 1878 mov(G7,L7); 1879 } 1880 1881 void MacroAssembler::restore_globals_from_locals() { 1882 mov(L1,G1); 1883 mov(L2,G2); 1884 mov(L3,G3); 1885 mov(L4,G4); 1886 mov(L5,G5); 1887 mov(L6,G6); 1888 mov(L7,G7); 1889 } 1890 1891 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1892 Register tmp, 1893 int offset) { 1894 intptr_t value = *delayed_value_addr; 1895 if (value != 0) 1896 return RegisterOrConstant(value + offset); 1897 1898 // load indirectly to solve generation ordering problem 1899 AddressLiteral a(delayed_value_addr); 1900 load_ptr_contents(a, tmp); 1901 1902 #ifdef ASSERT 1903 tst(tmp); 1904 breakpoint_trap(zero, xcc); 1905 #endif 1906 1907 if (offset != 0) 1908 add(tmp, offset, tmp); 1909 1910 return RegisterOrConstant(tmp); 1911 } 1912 1913 1914 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1915 assert(d.register_or_noreg() != G0, "lost side effect"); 1916 if ((s2.is_constant() && s2.as_constant() == 0) || 1917 (s2.is_register() && s2.as_register() == G0)) { 1918 // Do nothing, just move value. 1919 if (s1.is_register()) { 1920 if (d.is_constant()) d = temp; 1921 mov(s1.as_register(), d.as_register()); 1922 return d; 1923 } else { 1924 return s1; 1925 } 1926 } 1927 1928 if (s1.is_register()) { 1929 assert_different_registers(s1.as_register(), temp); 1930 if (d.is_constant()) d = temp; 1931 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1932 return d; 1933 } else { 1934 if (s2.is_register()) { 1935 assert_different_registers(s2.as_register(), temp); 1936 if (d.is_constant()) d = temp; 1937 set(s1.as_constant(), temp); 1938 andn(temp, s2.as_register(), d.as_register()); 1939 return d; 1940 } else { 1941 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1942 return res; 1943 } 1944 } 1945 } 1946 1947 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1948 assert(d.register_or_noreg() != G0, "lost side effect"); 1949 if ((s2.is_constant() && s2.as_constant() == 0) || 1950 (s2.is_register() && s2.as_register() == G0)) { 1951 // Do nothing, just move value. 1952 if (s1.is_register()) { 1953 if (d.is_constant()) d = temp; 1954 mov(s1.as_register(), d.as_register()); 1955 return d; 1956 } else { 1957 return s1; 1958 } 1959 } 1960 1961 if (s1.is_register()) { 1962 assert_different_registers(s1.as_register(), temp); 1963 if (d.is_constant()) d = temp; 1964 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1965 return d; 1966 } else { 1967 if (s2.is_register()) { 1968 assert_different_registers(s2.as_register(), temp); 1969 if (d.is_constant()) d = temp; 1970 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 1971 return d; 1972 } else { 1973 intptr_t res = s1.as_constant() + s2.as_constant(); 1974 return res; 1975 } 1976 } 1977 } 1978 1979 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1980 assert(d.register_or_noreg() != G0, "lost side effect"); 1981 if (!is_simm13(s2.constant_or_zero())) 1982 s2 = (s2.as_constant() & 0xFF); 1983 if ((s2.is_constant() && s2.as_constant() == 0) || 1984 (s2.is_register() && s2.as_register() == G0)) { 1985 // Do nothing, just move value. 1986 if (s1.is_register()) { 1987 if (d.is_constant()) d = temp; 1988 mov(s1.as_register(), d.as_register()); 1989 return d; 1990 } else { 1991 return s1; 1992 } 1993 } 1994 1995 if (s1.is_register()) { 1996 assert_different_registers(s1.as_register(), temp); 1997 if (d.is_constant()) d = temp; 1998 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1999 return d; 2000 } else { 2001 if (s2.is_register()) { 2002 assert_different_registers(s2.as_register(), temp); 2003 if (d.is_constant()) d = temp; 2004 set(s1.as_constant(), temp); 2005 sll_ptr(temp, s2.as_register(), d.as_register()); 2006 return d; 2007 } else { 2008 intptr_t res = s1.as_constant() << s2.as_constant(); 2009 return res; 2010 } 2011 } 2012 } 2013 2014 2015 // Look up the method for a megamorphic invokeinterface call. 2016 // The target method is determined by <intf_klass, itable_index>. 2017 // The receiver klass is in recv_klass. 2018 // On success, the result will be in method_result, and execution falls through. 2019 // On failure, execution transfers to the given label. 2020 void MacroAssembler::lookup_interface_method(Register recv_klass, 2021 Register intf_klass, 2022 RegisterOrConstant itable_index, 2023 Register method_result, 2024 Register scan_temp, 2025 Register sethi_temp, 2026 Label& L_no_such_interface) { 2027 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2028 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2029 "caller must use same register for non-constant itable index as for method"); 2030 2031 Label L_no_such_interface_restore; 2032 bool did_save = false; 2033 if (scan_temp == noreg || sethi_temp == noreg) { 2034 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2035 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2036 assert(method_result->is_global(), "must be able to return value"); 2037 scan_temp = L2; 2038 sethi_temp = L3; 2039 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2040 recv_klass = recv_2; 2041 intf_klass = intf_2; 2042 did_save = true; 2043 } 2044 2045 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2046 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2047 int scan_step = itableOffsetEntry::size() * wordSize; 2048 int vte_size = vtableEntry::size_in_bytes(); 2049 2050 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2051 // %%% We should store the aligned, prescaled offset in the klassoop. 2052 // Then the next several instructions would fold away. 2053 2054 int itb_offset = vtable_base; 2055 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2056 sll(scan_temp, itb_scale, scan_temp); 2057 add(scan_temp, itb_offset, scan_temp); 2058 add(recv_klass, scan_temp, scan_temp); 2059 2060 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2061 RegisterOrConstant itable_offset = itable_index; 2062 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2063 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2064 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2065 2066 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2067 // if (scan->interface() == intf) { 2068 // result = (klass + scan->offset() + itable_index); 2069 // } 2070 // } 2071 Label L_search, L_found_method; 2072 2073 for (int peel = 1; peel >= 0; peel--) { 2074 // %%%% Could load both offset and interface in one ldx, if they were 2075 // in the opposite order. This would save a load. 2076 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2077 2078 // Check that this entry is non-null. A null entry means that 2079 // the receiver class doesn't implement the interface, and wasn't the 2080 // same as when the caller was compiled. 2081 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2082 delayed()->cmp(method_result, intf_klass); 2083 2084 if (peel) { 2085 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2086 } else { 2087 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2088 // (invert the test to fall through to found_method...) 2089 } 2090 delayed()->add(scan_temp, scan_step, scan_temp); 2091 2092 if (!peel) break; 2093 2094 bind(L_search); 2095 } 2096 2097 bind(L_found_method); 2098 2099 // Got a hit. 2100 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2101 // scan_temp[-scan_step] points to the vtable offset we need 2102 ito_offset -= scan_step; 2103 lduw(scan_temp, ito_offset, scan_temp); 2104 ld_ptr(recv_klass, scan_temp, method_result); 2105 2106 if (did_save) { 2107 Label L_done; 2108 ba(L_done); 2109 delayed()->restore(); 2110 2111 bind(L_no_such_interface_restore); 2112 ba(L_no_such_interface); 2113 delayed()->restore(); 2114 2115 bind(L_done); 2116 } 2117 } 2118 2119 2120 // virtual method calling 2121 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2122 RegisterOrConstant vtable_index, 2123 Register method_result) { 2124 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2125 Register sethi_temp = method_result; 2126 const int base = in_bytes(Klass::vtable_start_offset()) + 2127 // method pointer offset within the vtable entry: 2128 vtableEntry::method_offset_in_bytes(); 2129 RegisterOrConstant vtable_offset = vtable_index; 2130 // Each of the following three lines potentially generates an instruction. 2131 // But the total number of address formation instructions will always be 2132 // at most two, and will often be zero. In any case, it will be optimal. 2133 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2134 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2135 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2136 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2137 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2138 ld_ptr(vtable_entry_addr, method_result); 2139 } 2140 2141 2142 void MacroAssembler::check_klass_subtype(Register sub_klass, 2143 Register super_klass, 2144 Register temp_reg, 2145 Register temp2_reg, 2146 Label& L_success) { 2147 Register sub_2 = sub_klass; 2148 Register sup_2 = super_klass; 2149 if (!sub_2->is_global()) sub_2 = L0; 2150 if (!sup_2->is_global()) sup_2 = L1; 2151 bool did_save = false; 2152 if (temp_reg == noreg || temp2_reg == noreg) { 2153 temp_reg = L2; 2154 temp2_reg = L3; 2155 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2156 sub_klass = sub_2; 2157 super_klass = sup_2; 2158 did_save = true; 2159 } 2160 Label L_failure, L_pop_to_failure, L_pop_to_success; 2161 check_klass_subtype_fast_path(sub_klass, super_klass, 2162 temp_reg, temp2_reg, 2163 (did_save ? &L_pop_to_success : &L_success), 2164 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2165 2166 if (!did_save) 2167 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2168 check_klass_subtype_slow_path(sub_2, sup_2, 2169 L2, L3, L4, L5, 2170 NULL, &L_pop_to_failure); 2171 2172 // on success: 2173 bind(L_pop_to_success); 2174 restore(); 2175 ba_short(L_success); 2176 2177 // on failure: 2178 bind(L_pop_to_failure); 2179 restore(); 2180 bind(L_failure); 2181 } 2182 2183 2184 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2185 Register super_klass, 2186 Register temp_reg, 2187 Register temp2_reg, 2188 Label* L_success, 2189 Label* L_failure, 2190 Label* L_slow_path, 2191 RegisterOrConstant super_check_offset) { 2192 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2193 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2194 2195 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2196 bool need_slow_path = (must_load_sco || 2197 super_check_offset.constant_or_zero() == sco_offset); 2198 2199 assert_different_registers(sub_klass, super_klass, temp_reg); 2200 if (super_check_offset.is_register()) { 2201 assert_different_registers(sub_klass, super_klass, temp_reg, 2202 super_check_offset.as_register()); 2203 } else if (must_load_sco) { 2204 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2205 } 2206 2207 Label L_fallthrough; 2208 int label_nulls = 0; 2209 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2210 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2211 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2212 assert(label_nulls <= 1 || 2213 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2214 "at most one NULL in the batch, usually"); 2215 2216 // If the pointers are equal, we are done (e.g., String[] elements). 2217 // This self-check enables sharing of secondary supertype arrays among 2218 // non-primary types such as array-of-interface. Otherwise, each such 2219 // type would need its own customized SSA. 2220 // We move this check to the front of the fast path because many 2221 // type checks are in fact trivially successful in this manner, 2222 // so we get a nicely predicted branch right at the start of the check. 2223 cmp(super_klass, sub_klass); 2224 brx(Assembler::equal, false, Assembler::pn, *L_success); 2225 delayed()->nop(); 2226 2227 // Check the supertype display: 2228 if (must_load_sco) { 2229 // The super check offset is always positive... 2230 lduw(super_klass, sco_offset, temp2_reg); 2231 super_check_offset = RegisterOrConstant(temp2_reg); 2232 // super_check_offset is register. 2233 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2234 } 2235 ld_ptr(sub_klass, super_check_offset, temp_reg); 2236 cmp(super_klass, temp_reg); 2237 2238 // This check has worked decisively for primary supers. 2239 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2240 // (Secondary supers are interfaces and very deeply nested subtypes.) 2241 // This works in the same check above because of a tricky aliasing 2242 // between the super_cache and the primary super display elements. 2243 // (The 'super_check_addr' can address either, as the case requires.) 2244 // Note that the cache is updated below if it does not help us find 2245 // what we need immediately. 2246 // So if it was a primary super, we can just fail immediately. 2247 // Otherwise, it's the slow path for us (no success at this point). 2248 2249 // Hacked ba(), which may only be used just before L_fallthrough. 2250 #define FINAL_JUMP(label) \ 2251 if (&(label) != &L_fallthrough) { \ 2252 ba(label); delayed()->nop(); \ 2253 } 2254 2255 if (super_check_offset.is_register()) { 2256 brx(Assembler::equal, false, Assembler::pn, *L_success); 2257 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2258 2259 if (L_failure == &L_fallthrough) { 2260 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2261 delayed()->nop(); 2262 } else { 2263 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2264 delayed()->nop(); 2265 FINAL_JUMP(*L_slow_path); 2266 } 2267 } else if (super_check_offset.as_constant() == sc_offset) { 2268 // Need a slow path; fast failure is impossible. 2269 if (L_slow_path == &L_fallthrough) { 2270 brx(Assembler::equal, false, Assembler::pt, *L_success); 2271 delayed()->nop(); 2272 } else { 2273 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2274 delayed()->nop(); 2275 FINAL_JUMP(*L_success); 2276 } 2277 } else { 2278 // No slow path; it's a fast decision. 2279 if (L_failure == &L_fallthrough) { 2280 brx(Assembler::equal, false, Assembler::pt, *L_success); 2281 delayed()->nop(); 2282 } else { 2283 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2284 delayed()->nop(); 2285 FINAL_JUMP(*L_success); 2286 } 2287 } 2288 2289 bind(L_fallthrough); 2290 2291 #undef FINAL_JUMP 2292 } 2293 2294 2295 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2296 Register super_klass, 2297 Register count_temp, 2298 Register scan_temp, 2299 Register scratch_reg, 2300 Register coop_reg, 2301 Label* L_success, 2302 Label* L_failure) { 2303 assert_different_registers(sub_klass, super_klass, 2304 count_temp, scan_temp, scratch_reg, coop_reg); 2305 2306 Label L_fallthrough, L_loop; 2307 int label_nulls = 0; 2308 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2309 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2310 assert(label_nulls <= 1, "at most one NULL in the batch"); 2311 2312 // a couple of useful fields in sub_klass: 2313 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2314 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2315 2316 // Do a linear scan of the secondary super-klass chain. 2317 // This code is rarely used, so simplicity is a virtue here. 2318 2319 #ifndef PRODUCT 2320 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2321 inc_counter((address) pst_counter, count_temp, scan_temp); 2322 #endif 2323 2324 // We will consult the secondary-super array. 2325 ld_ptr(sub_klass, ss_offset, scan_temp); 2326 2327 Register search_key = super_klass; 2328 2329 // Load the array length. (Positive movl does right thing on LP64.) 2330 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2331 2332 // Check for empty secondary super list 2333 tst(count_temp); 2334 2335 // In the array of super classes elements are pointer sized. 2336 int element_size = wordSize; 2337 2338 // Top of search loop 2339 bind(L_loop); 2340 br(Assembler::equal, false, Assembler::pn, *L_failure); 2341 delayed()->add(scan_temp, element_size, scan_temp); 2342 2343 // Skip the array header in all array accesses. 2344 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2345 elem_offset -= element_size; // the scan pointer was pre-incremented also 2346 2347 // Load next super to check 2348 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2349 2350 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2351 cmp(scratch_reg, search_key); 2352 2353 // A miss means we are NOT a subtype and need to keep looping 2354 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2355 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2356 2357 // Success. Cache the super we found and proceed in triumph. 2358 st_ptr(super_klass, sub_klass, sc_offset); 2359 2360 if (L_success != &L_fallthrough) { 2361 ba(*L_success); 2362 delayed()->nop(); 2363 } 2364 2365 bind(L_fallthrough); 2366 } 2367 2368 2369 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2370 Register temp_reg, 2371 int extra_slot_offset) { 2372 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2373 int stackElementSize = Interpreter::stackElementSize; 2374 int offset = extra_slot_offset * stackElementSize; 2375 if (arg_slot.is_constant()) { 2376 offset += arg_slot.as_constant() * stackElementSize; 2377 return offset; 2378 } else { 2379 assert(temp_reg != noreg, "must specify"); 2380 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2381 if (offset != 0) 2382 add(temp_reg, offset, temp_reg); 2383 return temp_reg; 2384 } 2385 } 2386 2387 2388 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2389 Register temp_reg, 2390 int extra_slot_offset) { 2391 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2392 } 2393 2394 2395 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2396 Register temp_reg, 2397 Label& done, Label* slow_case, 2398 BiasedLockingCounters* counters) { 2399 assert(UseBiasedLocking, "why call this otherwise?"); 2400 2401 if (PrintBiasedLockingStatistics) { 2402 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2403 if (counters == NULL) 2404 counters = BiasedLocking::counters(); 2405 } 2406 2407 Label cas_label; 2408 2409 // Biased locking 2410 // See whether the lock is currently biased toward our thread and 2411 // whether the epoch is still valid 2412 // Note that the runtime guarantees sufficient alignment of JavaThread 2413 // pointers to allow age to be placed into low bits 2414 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2415 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2416 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2417 2418 load_klass(obj_reg, temp_reg); 2419 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2420 or3(G2_thread, temp_reg, temp_reg); 2421 xor3(mark_reg, temp_reg, temp_reg); 2422 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2423 if (counters != NULL) { 2424 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2425 // Reload mark_reg as we may need it later 2426 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2427 } 2428 brx(Assembler::equal, true, Assembler::pt, done); 2429 delayed()->nop(); 2430 2431 Label try_revoke_bias; 2432 Label try_rebias; 2433 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2434 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2435 2436 // At this point we know that the header has the bias pattern and 2437 // that we are not the bias owner in the current epoch. We need to 2438 // figure out more details about the state of the header in order to 2439 // know what operations can be legally performed on the object's 2440 // header. 2441 2442 // If the low three bits in the xor result aren't clear, that means 2443 // the prototype header is no longer biased and we have to revoke 2444 // the bias on this object. 2445 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2446 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2447 2448 // Biasing is still enabled for this data type. See whether the 2449 // epoch of the current bias is still valid, meaning that the epoch 2450 // bits of the mark word are equal to the epoch bits of the 2451 // prototype header. (Note that the prototype header's epoch bits 2452 // only change at a safepoint.) If not, attempt to rebias the object 2453 // toward the current thread. Note that we must be absolutely sure 2454 // that the current epoch is invalid in order to do this because 2455 // otherwise the manipulations it performs on the mark word are 2456 // illegal. 2457 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2458 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2459 2460 // The epoch of the current bias is still valid but we know nothing 2461 // about the owner; it might be set or it might be clear. Try to 2462 // acquire the bias of the object using an atomic operation. If this 2463 // fails we will go in to the runtime to revoke the object's bias. 2464 // Note that we first construct the presumed unbiased header so we 2465 // don't accidentally blow away another thread's valid bias. 2466 delayed()->and3(mark_reg, 2467 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2468 mark_reg); 2469 or3(G2_thread, mark_reg, temp_reg); 2470 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2471 // If the biasing toward our thread failed, this means that 2472 // another thread succeeded in biasing it toward itself and we 2473 // need to revoke that bias. The revocation will occur in the 2474 // interpreter runtime in the slow case. 2475 cmp(mark_reg, temp_reg); 2476 if (counters != NULL) { 2477 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2478 } 2479 if (slow_case != NULL) { 2480 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2481 delayed()->nop(); 2482 } 2483 ba_short(done); 2484 2485 bind(try_rebias); 2486 // At this point we know the epoch has expired, meaning that the 2487 // current "bias owner", if any, is actually invalid. Under these 2488 // circumstances _only_, we are allowed to use the current header's 2489 // value as the comparison value when doing the cas to acquire the 2490 // bias in the current epoch. In other words, we allow transfer of 2491 // the bias from one thread to another directly in this situation. 2492 // 2493 // FIXME: due to a lack of registers we currently blow away the age 2494 // bits in this situation. Should attempt to preserve them. 2495 load_klass(obj_reg, temp_reg); 2496 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2497 or3(G2_thread, temp_reg, temp_reg); 2498 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2499 // If the biasing toward our thread failed, this means that 2500 // another thread succeeded in biasing it toward itself and we 2501 // need to revoke that bias. The revocation will occur in the 2502 // interpreter runtime in the slow case. 2503 cmp(mark_reg, temp_reg); 2504 if (counters != NULL) { 2505 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2506 } 2507 if (slow_case != NULL) { 2508 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2509 delayed()->nop(); 2510 } 2511 ba_short(done); 2512 2513 bind(try_revoke_bias); 2514 // The prototype mark in the klass doesn't have the bias bit set any 2515 // more, indicating that objects of this data type are not supposed 2516 // to be biased any more. We are going to try to reset the mark of 2517 // this object to the prototype value and fall through to the 2518 // CAS-based locking scheme. Note that if our CAS fails, it means 2519 // that another thread raced us for the privilege of revoking the 2520 // bias of this particular object, so it's okay to continue in the 2521 // normal locking code. 2522 // 2523 // FIXME: due to a lack of registers we currently blow away the age 2524 // bits in this situation. Should attempt to preserve them. 2525 load_klass(obj_reg, temp_reg); 2526 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2527 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2528 // Fall through to the normal CAS-based lock, because no matter what 2529 // the result of the above CAS, some thread must have succeeded in 2530 // removing the bias bit from the object's header. 2531 if (counters != NULL) { 2532 cmp(mark_reg, temp_reg); 2533 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2534 } 2535 2536 bind(cas_label); 2537 } 2538 2539 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2540 bool allow_delay_slot_filling) { 2541 // Check for biased locking unlock case, which is a no-op 2542 // Note: we do not have to check the thread ID for two reasons. 2543 // First, the interpreter checks for IllegalMonitorStateException at 2544 // a higher level. Second, if the bias was revoked while we held the 2545 // lock, the object could not be rebiased toward another thread, so 2546 // the bias bit would be clear. 2547 ld_ptr(mark_addr, temp_reg); 2548 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2549 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2550 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2551 delayed(); 2552 if (!allow_delay_slot_filling) { 2553 nop(); 2554 } 2555 } 2556 2557 2558 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2559 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2560 // The code could be tightened up considerably. 2561 // 2562 // box->dhw disposition - post-conditions at DONE_LABEL. 2563 // - Successful inflated lock: box->dhw != 0. 2564 // Any non-zero value suffices. 2565 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2566 // - Successful Stack-lock: box->dhw == mark. 2567 // box->dhw must contain the displaced mark word value 2568 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2569 // The slow-path fast_enter() and slow_enter() operators 2570 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2571 // - Biased: box->dhw is undefined 2572 // 2573 // SPARC refworkload performance - specifically jetstream and scimark - are 2574 // extremely sensitive to the size of the code emitted by compiler_lock_object 2575 // and compiler_unlock_object. Critically, the key factor is code size, not path 2576 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2577 // effect). 2578 2579 2580 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2581 Register Rbox, Register Rscratch, 2582 BiasedLockingCounters* counters, 2583 bool try_bias) { 2584 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2585 2586 verify_oop(Roop); 2587 Label done ; 2588 2589 if (counters != NULL) { 2590 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2591 } 2592 2593 if (EmitSync & 1) { 2594 mov(3, Rscratch); 2595 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2596 cmp(SP, G0); 2597 return ; 2598 } 2599 2600 if (EmitSync & 2) { 2601 2602 // Fetch object's markword 2603 ld_ptr(mark_addr, Rmark); 2604 2605 if (try_bias) { 2606 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2607 } 2608 2609 // Save Rbox in Rscratch to be used for the cas operation 2610 mov(Rbox, Rscratch); 2611 2612 // set Rmark to markOop | markOopDesc::unlocked_value 2613 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2614 2615 // Initialize the box. (Must happen before we update the object mark!) 2616 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2617 2618 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2619 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2620 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2621 2622 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2623 // hence we are done 2624 cmp(Rmark, Rscratch); 2625 sub(Rscratch, STACK_BIAS, Rscratch); 2626 brx(Assembler::equal, false, Assembler::pt, done); 2627 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2628 2629 // we did not find an unlocked object so see if this is a recursive case 2630 // sub(Rscratch, SP, Rscratch); 2631 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2632 andcc(Rscratch, 0xfffff003, Rscratch); 2633 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2634 bind (done); 2635 return ; 2636 } 2637 2638 Label Egress ; 2639 2640 if (EmitSync & 256) { 2641 Label IsInflated ; 2642 2643 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2644 // Triage: biased, stack-locked, neutral, inflated 2645 if (try_bias) { 2646 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2647 // Invariant: if control reaches this point in the emitted stream 2648 // then Rmark has not been modified. 2649 } 2650 2651 // Store mark into displaced mark field in the on-stack basic-lock "box" 2652 // Critically, this must happen before the CAS 2653 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2654 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2655 andcc(Rmark, 2, G0); 2656 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2657 delayed()-> 2658 2659 // Try stack-lock acquisition. 2660 // Beware: the 1st instruction is in a delay slot 2661 mov(Rbox, Rscratch); 2662 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2663 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2664 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2665 cmp(Rmark, Rscratch); 2666 brx(Assembler::equal, false, Assembler::pt, done); 2667 delayed()->sub(Rscratch, SP, Rscratch); 2668 2669 // Stack-lock attempt failed - check for recursive stack-lock. 2670 // See the comments below about how we might remove this case. 2671 sub(Rscratch, STACK_BIAS, Rscratch); 2672 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2673 andcc(Rscratch, 0xfffff003, Rscratch); 2674 br(Assembler::always, false, Assembler::pt, done); 2675 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2676 2677 bind(IsInflated); 2678 if (EmitSync & 64) { 2679 // If m->owner != null goto IsLocked 2680 // Pessimistic form: Test-and-CAS vs CAS 2681 // The optimistic form avoids RTS->RTO cache line upgrades. 2682 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2683 andcc(Rscratch, Rscratch, G0); 2684 brx(Assembler::notZero, false, Assembler::pn, done); 2685 delayed()->nop(); 2686 // m->owner == null : it's unlocked. 2687 } 2688 2689 // Try to CAS m->owner from null to Self 2690 // Invariant: if we acquire the lock then _recursions should be 0. 2691 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2692 mov(G2_thread, Rscratch); 2693 cas_ptr(Rmark, G0, Rscratch); 2694 cmp(Rscratch, G0); 2695 // Intentional fall-through into done 2696 } else { 2697 // Aggressively avoid the Store-before-CAS penalty 2698 // Defer the store into box->dhw until after the CAS 2699 Label IsInflated, Recursive ; 2700 2701 // Anticipate CAS -- Avoid RTS->RTO upgrade 2702 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2703 2704 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2705 // Triage: biased, stack-locked, neutral, inflated 2706 2707 if (try_bias) { 2708 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2709 // Invariant: if control reaches this point in the emitted stream 2710 // then Rmark has not been modified. 2711 } 2712 andcc(Rmark, 2, G0); 2713 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2714 delayed()-> // Beware - dangling delay-slot 2715 2716 // Try stack-lock acquisition. 2717 // Transiently install BUSY (0) encoding in the mark word. 2718 // if the CAS of 0 into the mark was successful then we execute: 2719 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2720 // ST obj->mark = box -- overwrite transient 0 value 2721 // This presumes TSO, of course. 2722 2723 mov(0, Rscratch); 2724 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2725 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2726 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2727 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2728 cmp(Rscratch, Rmark); 2729 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2730 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2731 if (counters != NULL) { 2732 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2733 } 2734 ba(done); 2735 delayed()->st_ptr(Rbox, mark_addr); 2736 2737 bind(Recursive); 2738 // Stack-lock attempt failed - check for recursive stack-lock. 2739 // Tests show that we can remove the recursive case with no impact 2740 // on refworkload 0.83. If we need to reduce the size of the code 2741 // emitted by compiler_lock_object() the recursive case is perfect 2742 // candidate. 2743 // 2744 // A more extreme idea is to always inflate on stack-lock recursion. 2745 // This lets us eliminate the recursive checks in compiler_lock_object 2746 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2747 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2748 // and showed a performance *increase*. In the same experiment I eliminated 2749 // the fast-path stack-lock code from the interpreter and always passed 2750 // control to the "slow" operators in synchronizer.cpp. 2751 2752 // RScratch contains the fetched obj->mark value from the failed CAS. 2753 sub(Rscratch, STACK_BIAS, Rscratch); 2754 sub(Rscratch, SP, Rscratch); 2755 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2756 andcc(Rscratch, 0xfffff003, Rscratch); 2757 if (counters != NULL) { 2758 // Accounting needs the Rscratch register 2759 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2760 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2761 ba_short(done); 2762 } else { 2763 ba(done); 2764 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2765 } 2766 2767 bind (IsInflated); 2768 2769 // Try to CAS m->owner from null to Self 2770 // Invariant: if we acquire the lock then _recursions should be 0. 2771 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2772 mov(G2_thread, Rscratch); 2773 cas_ptr(Rmark, G0, Rscratch); 2774 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2775 // set icc.zf : 1=success 0=failure 2776 // ST box->displaced_header = NonZero. 2777 // Any non-zero value suffices: 2778 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2779 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2780 // Intentional fall-through into done 2781 } 2782 2783 bind (done); 2784 } 2785 2786 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2787 Register Rbox, Register Rscratch, 2788 bool try_bias) { 2789 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2790 2791 Label done ; 2792 2793 if (EmitSync & 4) { 2794 cmp(SP, G0); 2795 return ; 2796 } 2797 2798 if (EmitSync & 8) { 2799 if (try_bias) { 2800 biased_locking_exit(mark_addr, Rscratch, done); 2801 } 2802 2803 // Test first if it is a fast recursive unlock 2804 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2805 br_null_short(Rmark, Assembler::pt, done); 2806 2807 // Check if it is still a light weight lock, this is is true if we see 2808 // the stack address of the basicLock in the markOop of the object 2809 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2810 cas_ptr(mark_addr.base(), Rbox, Rmark); 2811 ba(done); 2812 delayed()->cmp(Rbox, Rmark); 2813 bind(done); 2814 return ; 2815 } 2816 2817 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2818 // is too large performance rolls abruptly off a cliff. 2819 // This could be related to inlining policies, code cache management, or 2820 // I$ effects. 2821 Label LStacked ; 2822 2823 if (try_bias) { 2824 // TODO: eliminate redundant LDs of obj->mark 2825 biased_locking_exit(mark_addr, Rscratch, done); 2826 } 2827 2828 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2829 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2830 andcc(Rscratch, Rscratch, G0); 2831 brx(Assembler::zero, false, Assembler::pn, done); 2832 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2833 andcc(Rmark, 2, G0); 2834 brx(Assembler::zero, false, Assembler::pt, LStacked); 2835 delayed()->nop(); 2836 2837 // It's inflated 2838 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2839 // the ST of 0 into _owner which releases the lock. This prevents loads 2840 // and stores within the critical section from reordering (floating) 2841 // past the store that releases the lock. But TSO is a strong memory model 2842 // and that particular flavor of barrier is a noop, so we can safely elide it. 2843 // Note that we use 1-0 locking by default for the inflated case. We 2844 // close the resultant (and rare) race by having contended threads in 2845 // monitorenter periodically poll _owner. 2846 2847 if (EmitSync & 1024) { 2848 // Emit code to check that _owner == Self 2849 // We could fold the _owner test into subsequent code more efficiently 2850 // than using a stand-alone check, but since _owner checking is off by 2851 // default we don't bother. We also might consider predicating the 2852 // _owner==Self check on Xcheck:jni or running on a debug build. 2853 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2854 orcc(Rscratch, G0, G0); 2855 brx(Assembler::notZero, false, Assembler::pn, done); 2856 delayed()->nop(); 2857 } 2858 2859 if (EmitSync & 512) { 2860 // classic lock release code absent 1-0 locking 2861 // m->Owner = null; 2862 // membar #storeload 2863 // if (m->cxq|m->EntryList) == null goto Success 2864 // if (m->succ != null) goto Success 2865 // if CAS (&m->Owner,0,Self) != 0 goto Success 2866 // goto SlowPath 2867 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2868 orcc(Rbox, G0, G0); 2869 brx(Assembler::notZero, false, Assembler::pn, done); 2870 delayed()->nop(); 2871 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2872 if (os::is_MP()) { membar(StoreLoad); } 2873 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2874 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2875 orcc(Rbox, Rscratch, G0); 2876 brx(Assembler::zero, false, Assembler::pt, done); 2877 delayed()-> 2878 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2879 andcc(Rscratch, Rscratch, G0); 2880 brx(Assembler::notZero, false, Assembler::pt, done); 2881 delayed()->andcc(G0, G0, G0); 2882 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2883 mov(G2_thread, Rscratch); 2884 cas_ptr(Rmark, G0, Rscratch); 2885 cmp(Rscratch, G0); 2886 // invert icc.zf and goto done 2887 brx(Assembler::notZero, false, Assembler::pt, done); 2888 delayed()->cmp(G0, G0); 2889 br(Assembler::always, false, Assembler::pt, done); 2890 delayed()->cmp(G0, 1); 2891 } else { 2892 // 1-0 form : avoids CAS and MEMBAR in the common case 2893 // Do not bother to ratify that m->Owner == Self. 2894 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2895 orcc(Rbox, G0, G0); 2896 brx(Assembler::notZero, false, Assembler::pn, done); 2897 delayed()-> 2898 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2899 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2900 orcc(Rbox, Rscratch, G0); 2901 if (EmitSync & 16384) { 2902 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2903 // we should transfer control directly to the slow-path. 2904 // This test makes the reacquire operation below very infrequent. 2905 // The logic is equivalent to : 2906 // if (cxq|EntryList) == null : Owner=null; goto Success 2907 // if succ == null : goto SlowPath 2908 // Owner=null; membar #storeload 2909 // if succ != null : goto Success 2910 // if CAS(&Owner,null,Self) != null goto Success 2911 // goto SlowPath 2912 brx(Assembler::zero, true, Assembler::pt, done); 2913 delayed()-> 2914 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2915 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2916 andcc(Rscratch, Rscratch, G0) ; 2917 brx(Assembler::zero, false, Assembler::pt, done); 2918 delayed()->orcc(G0, 1, G0); 2919 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2920 } else { 2921 brx(Assembler::zero, false, Assembler::pt, done); 2922 delayed()-> 2923 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2924 } 2925 if (os::is_MP()) { membar(StoreLoad); } 2926 // Check that _succ is (or remains) non-zero 2927 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2928 andcc(Rscratch, Rscratch, G0); 2929 brx(Assembler::notZero, false, Assembler::pt, done); 2930 delayed()->andcc(G0, G0, G0); 2931 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2932 mov(G2_thread, Rscratch); 2933 cas_ptr(Rmark, G0, Rscratch); 2934 cmp(Rscratch, G0); 2935 // invert icc.zf and goto done 2936 // A slightly better v8+/v9 idiom would be the following: 2937 // movrnz Rscratch,1,Rscratch 2938 // ba done 2939 // xorcc Rscratch,1,G0 2940 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2941 brx(Assembler::notZero, false, Assembler::pt, done); 2942 delayed()->cmp(G0, G0); 2943 br(Assembler::always, false, Assembler::pt, done); 2944 delayed()->cmp(G0, 1); 2945 } 2946 2947 bind (LStacked); 2948 // Consider: we could replace the expensive CAS in the exit 2949 // path with a simple ST of the displaced mark value fetched from 2950 // the on-stack basiclock box. That admits a race where a thread T2 2951 // in the slow lock path -- inflating with monitor M -- could race a 2952 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2953 // More precisely T1 in the stack-lock unlock path could "stomp" the 2954 // inflated mark value M installed by T2, resulting in an orphan 2955 // object monitor M and T2 becoming stranded. We can remedy that situation 2956 // by having T2 periodically poll the object's mark word using timed wait 2957 // operations. If T2 discovers that a stomp has occurred it vacates 2958 // the monitor M and wakes any other threads stranded on the now-orphan M. 2959 // In addition the monitor scavenger, which performs deflation, 2960 // would also need to check for orpan monitors and stranded threads. 2961 // 2962 // Finally, inflation is also used when T2 needs to assign a hashCode 2963 // to O and O is stack-locked by T1. The "stomp" race could cause 2964 // an assigned hashCode value to be lost. We can avoid that condition 2965 // and provide the necessary hashCode stability invariants by ensuring 2966 // that hashCode generation is idempotent between copying GCs. 2967 // For example we could compute the hashCode of an object O as 2968 // O's heap address XOR some high quality RNG value that is refreshed 2969 // at GC-time. The monitor scavenger would install the hashCode 2970 // found in any orphan monitors. Again, the mechanism admits a 2971 // lost-update "stomp" WAW race but detects and recovers as needed. 2972 // 2973 // A prototype implementation showed excellent results, although 2974 // the scavenger and timeout code was rather involved. 2975 2976 cas_ptr(mark_addr.base(), Rbox, Rscratch); 2977 cmp(Rbox, Rscratch); 2978 // Intentional fall through into done ... 2979 2980 bind(done); 2981 } 2982 2983 2984 2985 void MacroAssembler::print_CPU_state() { 2986 // %%%%% need to implement this 2987 } 2988 2989 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 2990 // %%%%% need to implement this 2991 } 2992 2993 void MacroAssembler::push_IU_state() { 2994 // %%%%% need to implement this 2995 } 2996 2997 2998 void MacroAssembler::pop_IU_state() { 2999 // %%%%% need to implement this 3000 } 3001 3002 3003 void MacroAssembler::push_FPU_state() { 3004 // %%%%% need to implement this 3005 } 3006 3007 3008 void MacroAssembler::pop_FPU_state() { 3009 // %%%%% need to implement this 3010 } 3011 3012 3013 void MacroAssembler::push_CPU_state() { 3014 // %%%%% need to implement this 3015 } 3016 3017 3018 void MacroAssembler::pop_CPU_state() { 3019 // %%%%% need to implement this 3020 } 3021 3022 3023 3024 void MacroAssembler::verify_tlab() { 3025 #ifdef ASSERT 3026 if (UseTLAB && VerifyOops) { 3027 Label next, next2, ok; 3028 Register t1 = L0; 3029 Register t2 = L1; 3030 Register t3 = L2; 3031 3032 save_frame(0); 3033 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3034 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3035 or3(t1, t2, t3); 3036 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3037 STOP("assert(top >= start)"); 3038 should_not_reach_here(); 3039 3040 bind(next); 3041 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3042 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3043 or3(t3, t2, t3); 3044 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3045 STOP("assert(top <= end)"); 3046 should_not_reach_here(); 3047 3048 bind(next2); 3049 and3(t3, MinObjAlignmentInBytesMask, t3); 3050 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3051 STOP("assert(aligned)"); 3052 should_not_reach_here(); 3053 3054 bind(ok); 3055 restore(); 3056 } 3057 #endif 3058 } 3059 3060 3061 void MacroAssembler::eden_allocate( 3062 Register obj, // result: pointer to object after successful allocation 3063 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3064 int con_size_in_bytes, // object size in bytes if known at compile time 3065 Register t1, // temp register 3066 Register t2, // temp register 3067 Label& slow_case // continuation point if fast allocation fails 3068 ){ 3069 // make sure arguments make sense 3070 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3071 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3072 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3073 3074 if (!Universe::heap()->supports_inline_contig_alloc()) { 3075 // No allocation in the shared eden. 3076 ba(slow_case); 3077 delayed()->nop(); 3078 } else { 3079 // get eden boundaries 3080 // note: we need both top & top_addr! 3081 const Register top_addr = t1; 3082 const Register end = t2; 3083 3084 CollectedHeap* ch = Universe::heap(); 3085 set((intx)ch->top_addr(), top_addr); 3086 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3087 ld_ptr(top_addr, delta, end); 3088 ld_ptr(top_addr, 0, obj); 3089 3090 // try to allocate 3091 Label retry; 3092 bind(retry); 3093 #ifdef ASSERT 3094 // make sure eden top is properly aligned 3095 { 3096 Label L; 3097 btst(MinObjAlignmentInBytesMask, obj); 3098 br(Assembler::zero, false, Assembler::pt, L); 3099 delayed()->nop(); 3100 STOP("eden top is not properly aligned"); 3101 bind(L); 3102 } 3103 #endif // ASSERT 3104 const Register free = end; 3105 sub(end, obj, free); // compute amount of free space 3106 if (var_size_in_bytes->is_valid()) { 3107 // size is unknown at compile time 3108 cmp(free, var_size_in_bytes); 3109 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3110 delayed()->add(obj, var_size_in_bytes, end); 3111 } else { 3112 // size is known at compile time 3113 cmp(free, con_size_in_bytes); 3114 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3115 delayed()->add(obj, con_size_in_bytes, end); 3116 } 3117 // Compare obj with the value at top_addr; if still equal, swap the value of 3118 // end with the value at top_addr. If not equal, read the value at top_addr 3119 // into end. 3120 cas_ptr(top_addr, obj, end); 3121 // if someone beat us on the allocation, try again, otherwise continue 3122 cmp(obj, end); 3123 brx(Assembler::notEqual, false, Assembler::pn, retry); 3124 delayed()->mov(end, obj); // nop if successfull since obj == end 3125 3126 #ifdef ASSERT 3127 // make sure eden top is properly aligned 3128 { 3129 Label L; 3130 const Register top_addr = t1; 3131 3132 set((intx)ch->top_addr(), top_addr); 3133 ld_ptr(top_addr, 0, top_addr); 3134 btst(MinObjAlignmentInBytesMask, top_addr); 3135 br(Assembler::zero, false, Assembler::pt, L); 3136 delayed()->nop(); 3137 STOP("eden top is not properly aligned"); 3138 bind(L); 3139 } 3140 #endif // ASSERT 3141 } 3142 } 3143 3144 3145 void MacroAssembler::tlab_allocate( 3146 Register obj, // result: pointer to object after successful allocation 3147 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3148 int con_size_in_bytes, // object size in bytes if known at compile time 3149 Register t1, // temp register 3150 Label& slow_case // continuation point if fast allocation fails 3151 ){ 3152 // make sure arguments make sense 3153 assert_different_registers(obj, var_size_in_bytes, t1); 3154 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3155 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3156 3157 const Register free = t1; 3158 3159 verify_tlab(); 3160 3161 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3162 3163 // calculate amount of free space 3164 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3165 sub(free, obj, free); 3166 3167 Label done; 3168 if (var_size_in_bytes == noreg) { 3169 cmp(free, con_size_in_bytes); 3170 } else { 3171 cmp(free, var_size_in_bytes); 3172 } 3173 br(Assembler::less, false, Assembler::pn, slow_case); 3174 // calculate the new top pointer 3175 if (var_size_in_bytes == noreg) { 3176 delayed()->add(obj, con_size_in_bytes, free); 3177 } else { 3178 delayed()->add(obj, var_size_in_bytes, free); 3179 } 3180 3181 bind(done); 3182 3183 #ifdef ASSERT 3184 // make sure new free pointer is properly aligned 3185 { 3186 Label L; 3187 btst(MinObjAlignmentInBytesMask, free); 3188 br(Assembler::zero, false, Assembler::pt, L); 3189 delayed()->nop(); 3190 STOP("updated TLAB free is not properly aligned"); 3191 bind(L); 3192 } 3193 #endif // ASSERT 3194 3195 // update the tlab top pointer 3196 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3197 verify_tlab(); 3198 } 3199 3200 3201 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3202 Register top = O0; 3203 Register t1 = G1; 3204 Register t2 = G3; 3205 Register t3 = O1; 3206 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3207 Label do_refill, discard_tlab; 3208 3209 if (!Universe::heap()->supports_inline_contig_alloc()) { 3210 // No allocation in the shared eden. 3211 ba(slow_case); 3212 delayed()->nop(); 3213 } 3214 3215 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3216 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3217 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3218 3219 // calculate amount of free space 3220 sub(t1, top, t1); 3221 srl_ptr(t1, LogHeapWordSize, t1); 3222 3223 // Retain tlab and allocate object in shared space if 3224 // the amount free in the tlab is too large to discard. 3225 cmp(t1, t2); 3226 3227 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3228 // increment waste limit to prevent getting stuck on this slow path 3229 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3230 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3231 } else { 3232 delayed()->nop(); 3233 // set64 does not use the temp register if the given constant is 32 bit. So 3234 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3235 // of that value. 3236 set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); 3237 add(t2, t3, t2); 3238 } 3239 3240 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3241 if (TLABStats) { 3242 // increment number of slow_allocations 3243 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3244 add(t2, 1, t2); 3245 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3246 } 3247 ba(try_eden); 3248 delayed()->nop(); 3249 3250 bind(discard_tlab); 3251 if (TLABStats) { 3252 // increment number of refills 3253 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3254 add(t2, 1, t2); 3255 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3256 // accumulate wastage 3257 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3258 add(t2, t1, t2); 3259 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3260 } 3261 3262 // if tlab is currently allocated (top or end != null) then 3263 // fill [top, end + alignment_reserve) with array object 3264 br_null_short(top, Assembler::pn, do_refill); 3265 3266 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3267 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3268 // set klass to intArrayKlass 3269 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3270 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3271 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3272 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3273 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3274 ld_ptr(t2, 0, t2); 3275 // store klass last. concurrent gcs assumes klass length is valid if 3276 // klass field is not null. 3277 store_klass(t2, top); 3278 verify_oop(top); 3279 3280 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3281 sub(top, t1, t1); // size of tlab's allocated portion 3282 incr_allocated_bytes(t1, t2, t3); 3283 3284 // refill the tlab with an eden allocation 3285 bind(do_refill); 3286 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3287 sll_ptr(t1, LogHeapWordSize, t1); 3288 // allocate new tlab, address returned in top 3289 eden_allocate(top, t1, 0, t2, t3, slow_case); 3290 3291 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3292 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3293 #ifdef ASSERT 3294 // check that tlab_size (t1) is still valid 3295 { 3296 Label ok; 3297 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3298 sll_ptr(t2, LogHeapWordSize, t2); 3299 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3300 STOP("assert(t1 == tlab_size)"); 3301 should_not_reach_here(); 3302 3303 bind(ok); 3304 } 3305 #endif // ASSERT 3306 add(top, t1, top); // t1 is tlab_size 3307 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3308 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3309 3310 if (ZeroTLAB) { 3311 // This is a fast TLAB refill, therefore the GC is not notified of it. 3312 // So compiled code must fill the new TLAB with zeroes. 3313 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3314 zero_memory(t2, t1); 3315 } 3316 verify_tlab(); 3317 ba(retry); 3318 delayed()->nop(); 3319 } 3320 3321 void MacroAssembler::zero_memory(Register base, Register index) { 3322 assert_different_registers(base, index); 3323 Label loop; 3324 bind(loop); 3325 subcc(index, HeapWordSize, index); 3326 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3327 delayed()->st_ptr(G0, base, index); 3328 } 3329 3330 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3331 Register t1, Register t2) { 3332 // Bump total bytes allocated by this thread 3333 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3334 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3335 // v8 support has gone the way of the dodo 3336 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3337 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3338 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3339 } 3340 3341 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3342 switch (cond) { 3343 // Note some conditions are synonyms for others 3344 case Assembler::never: return Assembler::always; 3345 case Assembler::zero: return Assembler::notZero; 3346 case Assembler::lessEqual: return Assembler::greater; 3347 case Assembler::less: return Assembler::greaterEqual; 3348 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3349 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3350 case Assembler::negative: return Assembler::positive; 3351 case Assembler::overflowSet: return Assembler::overflowClear; 3352 case Assembler::always: return Assembler::never; 3353 case Assembler::notZero: return Assembler::zero; 3354 case Assembler::greater: return Assembler::lessEqual; 3355 case Assembler::greaterEqual: return Assembler::less; 3356 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3357 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3358 case Assembler::positive: return Assembler::negative; 3359 case Assembler::overflowClear: return Assembler::overflowSet; 3360 } 3361 3362 ShouldNotReachHere(); return Assembler::overflowClear; 3363 } 3364 3365 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3366 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3367 Condition negated_cond = negate_condition(cond); 3368 Label L; 3369 brx(negated_cond, false, Assembler::pt, L); 3370 delayed()->nop(); 3371 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3372 bind(L); 3373 } 3374 3375 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3376 AddressLiteral addrlit(counter_addr); 3377 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3378 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3379 ld(addr, Rtmp2); 3380 inc(Rtmp2); 3381 st(Rtmp2, addr); 3382 } 3383 3384 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3385 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3386 } 3387 3388 SkipIfEqual::SkipIfEqual( 3389 MacroAssembler* masm, Register temp, const bool* flag_addr, 3390 Assembler::Condition condition) { 3391 _masm = masm; 3392 AddressLiteral flag(flag_addr); 3393 _masm->sethi(flag, temp); 3394 _masm->ldub(temp, flag.low10(), temp); 3395 _masm->tst(temp); 3396 _masm->br(condition, false, Assembler::pt, _label); 3397 _masm->delayed()->nop(); 3398 } 3399 3400 SkipIfEqual::~SkipIfEqual() { 3401 _masm->bind(_label); 3402 } 3403 3404 3405 // Writes to stack successive pages until offset reached to check for 3406 // stack overflow + shadow pages. This clobbers tsp and scratch. 3407 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3408 Register Rscratch) { 3409 // Use stack pointer in temp stack pointer 3410 mov(SP, Rtsp); 3411 3412 // Bang stack for total size given plus stack shadow page size. 3413 // Bang one page at a time because a large size can overflow yellow and 3414 // red zones (the bang will fail but stack overflow handling can't tell that 3415 // it was a stack overflow bang vs a regular segv). 3416 int offset = os::vm_page_size(); 3417 Register Roffset = Rscratch; 3418 3419 Label loop; 3420 bind(loop); 3421 set((-offset)+STACK_BIAS, Rscratch); 3422 st(G0, Rtsp, Rscratch); 3423 set(offset, Roffset); 3424 sub(Rsize, Roffset, Rsize); 3425 cmp(Rsize, G0); 3426 br(Assembler::greater, false, Assembler::pn, loop); 3427 delayed()->sub(Rtsp, Roffset, Rtsp); 3428 3429 // Bang down shadow pages too. 3430 // At this point, (tmp-0) is the last address touched, so don't 3431 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3432 // was post-decremented.) Skip this address by starting at i=1, and 3433 // touch a few more pages below. N.B. It is important to touch all 3434 // the way down to and including i=StackShadowPages. 3435 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3436 set((-i*offset)+STACK_BIAS, Rscratch); 3437 st(G0, Rtsp, Rscratch); 3438 } 3439 } 3440 3441 void MacroAssembler::reserved_stack_check() { 3442 // testing if reserved zone needs to be enabled 3443 Label no_reserved_zone_enabling; 3444 3445 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3446 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3447 3448 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3449 3450 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3451 jump_to(stub, G4_scratch); 3452 delayed()->restore(); 3453 3454 should_not_reach_here(); 3455 3456 bind(no_reserved_zone_enabling); 3457 } 3458 3459 /////////////////////////////////////////////////////////////////////////////////// 3460 #if INCLUDE_ALL_GCS 3461 3462 static address satb_log_enqueue_with_frame = NULL; 3463 static u_char* satb_log_enqueue_with_frame_end = NULL; 3464 3465 static address satb_log_enqueue_frameless = NULL; 3466 static u_char* satb_log_enqueue_frameless_end = NULL; 3467 3468 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3469 3470 static void generate_satb_log_enqueue(bool with_frame) { 3471 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3472 CodeBuffer buf(bb); 3473 MacroAssembler masm(&buf); 3474 3475 #define __ masm. 3476 3477 address start = __ pc(); 3478 Register pre_val; 3479 3480 Label refill, restart; 3481 if (with_frame) { 3482 __ save_frame(0); 3483 pre_val = I0; // Was O0 before the save. 3484 } else { 3485 pre_val = O0; 3486 } 3487 3488 int satb_q_index_byte_offset = 3489 in_bytes(JavaThread::satb_mark_queue_offset() + 3490 SATBMarkQueue::byte_offset_of_index()); 3491 3492 int satb_q_buf_byte_offset = 3493 in_bytes(JavaThread::satb_mark_queue_offset() + 3494 SATBMarkQueue::byte_offset_of_buf()); 3495 3496 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3497 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3498 "check sizes in assembly below"); 3499 3500 __ bind(restart); 3501 3502 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3503 // so ld_ptr is appropriate. 3504 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3505 3506 // index == 0? 3507 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3508 3509 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3510 __ sub(L0, oopSize, L0); 3511 3512 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3513 if (!with_frame) { 3514 // Use return-from-leaf 3515 __ retl(); 3516 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3517 } else { 3518 // Not delayed. 3519 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3520 } 3521 if (with_frame) { 3522 __ ret(); 3523 __ delayed()->restore(); 3524 } 3525 __ bind(refill); 3526 3527 address handle_zero = 3528 CAST_FROM_FN_PTR(address, 3529 &SATBMarkQueueSet::handle_zero_index_for_thread); 3530 // This should be rare enough that we can afford to save all the 3531 // scratch registers that the calling context might be using. 3532 __ mov(G1_scratch, L0); 3533 __ mov(G3_scratch, L1); 3534 __ mov(G4, L2); 3535 // We need the value of O0 above (for the write into the buffer), so we 3536 // save and restore it. 3537 __ mov(O0, L3); 3538 // Since the call will overwrite O7, we save and restore that, as well. 3539 __ mov(O7, L4); 3540 __ call_VM_leaf(L5, handle_zero, G2_thread); 3541 __ mov(L0, G1_scratch); 3542 __ mov(L1, G3_scratch); 3543 __ mov(L2, G4); 3544 __ mov(L3, O0); 3545 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3546 __ delayed()->mov(L4, O7); 3547 3548 if (with_frame) { 3549 satb_log_enqueue_with_frame = start; 3550 satb_log_enqueue_with_frame_end = __ pc(); 3551 } else { 3552 satb_log_enqueue_frameless = start; 3553 satb_log_enqueue_frameless_end = __ pc(); 3554 } 3555 3556 #undef __ 3557 } 3558 3559 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 3560 if (with_frame) { 3561 if (satb_log_enqueue_with_frame == 0) { 3562 generate_satb_log_enqueue(with_frame); 3563 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3564 } 3565 } else { 3566 if (satb_log_enqueue_frameless == 0) { 3567 generate_satb_log_enqueue(with_frame); 3568 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3569 } 3570 } 3571 } 3572 3573 void MacroAssembler::g1_write_barrier_pre(Register obj, 3574 Register index, 3575 int offset, 3576 Register pre_val, 3577 Register tmp, 3578 bool preserve_o_regs) { 3579 Label filtered; 3580 3581 if (obj == noreg) { 3582 // We are not loading the previous value so make 3583 // sure that we don't trash the value in pre_val 3584 // with the code below. 3585 assert_different_registers(pre_val, tmp); 3586 } else { 3587 // We will be loading the previous value 3588 // in this code so... 3589 assert(offset == 0 || index == noreg, "choose one"); 3590 assert(pre_val == noreg, "check this code"); 3591 } 3592 3593 // Is marking active? 3594 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3595 ld(G2, 3596 in_bytes(JavaThread::satb_mark_queue_offset() + 3597 SATBMarkQueue::byte_offset_of_active()), 3598 tmp); 3599 } else { 3600 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3601 "Assumption"); 3602 ldsb(G2, 3603 in_bytes(JavaThread::satb_mark_queue_offset() + 3604 SATBMarkQueue::byte_offset_of_active()), 3605 tmp); 3606 } 3607 3608 // Is marking active? 3609 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3610 3611 // Do we need to load the previous value? 3612 if (obj != noreg) { 3613 // Load the previous value... 3614 if (index == noreg) { 3615 if (Assembler::is_simm13(offset)) { 3616 load_heap_oop(obj, offset, tmp); 3617 } else { 3618 set(offset, tmp); 3619 load_heap_oop(obj, tmp, tmp); 3620 } 3621 } else { 3622 load_heap_oop(obj, index, tmp); 3623 } 3624 // Previous value has been loaded into tmp 3625 pre_val = tmp; 3626 } 3627 3628 assert(pre_val != noreg, "must have a real register"); 3629 3630 // Is the previous value null? 3631 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3632 3633 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3634 // case, pre_val will be a scratch G-reg, but there are some cases in 3635 // which it's an O-reg. In the first case, do a normal call. In the 3636 // latter, do a save here and call the frameless version. 3637 3638 guarantee(pre_val->is_global() || pre_val->is_out(), 3639 "Or we need to think harder."); 3640 3641 if (pre_val->is_global() && !preserve_o_regs) { 3642 generate_satb_log_enqueue_if_necessary(true); // with frame 3643 3644 call(satb_log_enqueue_with_frame); 3645 delayed()->mov(pre_val, O0); 3646 } else { 3647 generate_satb_log_enqueue_if_necessary(false); // frameless 3648 3649 save_frame(0); 3650 call(satb_log_enqueue_frameless); 3651 delayed()->mov(pre_val->after_save(), O0); 3652 restore(); 3653 } 3654 3655 bind(filtered); 3656 } 3657 3658 static address dirty_card_log_enqueue = 0; 3659 static u_char* dirty_card_log_enqueue_end = 0; 3660 3661 // This gets to assume that o0 contains the object address. 3662 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3663 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3664 CodeBuffer buf(bb); 3665 MacroAssembler masm(&buf); 3666 #define __ masm. 3667 address start = __ pc(); 3668 3669 Label not_already_dirty, restart, refill, young_card; 3670 3671 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3672 AddressLiteral addrlit(byte_map_base); 3673 __ set(addrlit, O1); // O1 := <card table base> 3674 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3675 3676 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3677 3678 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3679 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3680 3681 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3682 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3683 3684 __ bind(young_card); 3685 // We didn't take the branch, so we're already dirty: return. 3686 // Use return-from-leaf 3687 __ retl(); 3688 __ delayed()->nop(); 3689 3690 // Not dirty. 3691 __ bind(not_already_dirty); 3692 3693 // Get O0 + O1 into a reg by itself 3694 __ add(O0, O1, O3); 3695 3696 // First, dirty it. 3697 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3698 3699 int dirty_card_q_index_byte_offset = 3700 in_bytes(JavaThread::dirty_card_queue_offset() + 3701 DirtyCardQueue::byte_offset_of_index()); 3702 int dirty_card_q_buf_byte_offset = 3703 in_bytes(JavaThread::dirty_card_queue_offset() + 3704 DirtyCardQueue::byte_offset_of_buf()); 3705 __ bind(restart); 3706 3707 // Load the index into the update buffer. DirtyCardQueue::_index is 3708 // a size_t so ld_ptr is appropriate here. 3709 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3710 3711 // index == 0? 3712 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3713 3714 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3715 __ sub(L0, oopSize, L0); 3716 3717 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3718 // Use return-from-leaf 3719 __ retl(); 3720 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3721 3722 __ bind(refill); 3723 address handle_zero = 3724 CAST_FROM_FN_PTR(address, 3725 &DirtyCardQueueSet::handle_zero_index_for_thread); 3726 // This should be rare enough that we can afford to save all the 3727 // scratch registers that the calling context might be using. 3728 __ mov(G1_scratch, L3); 3729 __ mov(G3_scratch, L5); 3730 // We need the value of O3 above (for the write into the buffer), so we 3731 // save and restore it. 3732 __ mov(O3, L6); 3733 // Since the call will overwrite O7, we save and restore that, as well. 3734 __ mov(O7, L4); 3735 3736 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3737 __ mov(L3, G1_scratch); 3738 __ mov(L5, G3_scratch); 3739 __ mov(L6, O3); 3740 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3741 __ delayed()->mov(L4, O7); 3742 3743 dirty_card_log_enqueue = start; 3744 dirty_card_log_enqueue_end = __ pc(); 3745 // XXX Should have a guarantee here about not going off the end! 3746 // Does it already do so? Do an experiment... 3747 3748 #undef __ 3749 3750 } 3751 3752 static inline void 3753 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 3754 if (dirty_card_log_enqueue == 0) { 3755 generate_dirty_card_log_enqueue(byte_map_base); 3756 assert(dirty_card_log_enqueue != 0, "postcondition."); 3757 } 3758 } 3759 3760 3761 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3762 3763 Label filtered; 3764 MacroAssembler* post_filter_masm = this; 3765 3766 if (new_val == G0) return; 3767 3768 G1SATBCardTableLoggingModRefBS* bs = 3769 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); 3770 3771 if (G1RSBarrierRegionFilter) { 3772 xor3(store_addr, new_val, tmp); 3773 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3774 3775 // XXX Should I predict this taken or not? Does it matter? 3776 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3777 } 3778 3779 // If the "store_addr" register is an "in" or "local" register, move it to 3780 // a scratch reg so we can pass it as an argument. 3781 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3782 // Pick a scratch register different from "tmp". 3783 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3784 // Make sure we use up the delay slot! 3785 if (use_scr) { 3786 post_filter_masm->mov(store_addr, scr); 3787 } else { 3788 post_filter_masm->nop(); 3789 } 3790 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 3791 save_frame(0); 3792 call(dirty_card_log_enqueue); 3793 if (use_scr) { 3794 delayed()->mov(scr, O0); 3795 } else { 3796 delayed()->mov(store_addr->after_save(), O0); 3797 } 3798 restore(); 3799 3800 bind(filtered); 3801 } 3802 3803 #endif // INCLUDE_ALL_GCS 3804 /////////////////////////////////////////////////////////////////////////////////// 3805 3806 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3807 // If we're writing constant NULL, we can skip the write barrier. 3808 if (new_val == G0) return; 3809 CardTableModRefBS* bs = 3810 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 3811 assert(bs->kind() == BarrierSet::CardTableForRS || 3812 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3813 card_table_write(bs->byte_map_base, tmp, store_addr); 3814 } 3815 3816 void MacroAssembler::load_mirror(Register mirror, Register method) { 3817 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3818 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3819 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3820 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3821 ld_ptr(mirror, mirror_offset, mirror); 3822 } 3823 3824 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3825 // The number of bytes in this code is used by 3826 // MachCallDynamicJavaNode::ret_addr_offset() 3827 // if this changes, change that. 3828 if (UseCompressedClassPointers) { 3829 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3830 decode_klass_not_null(klass); 3831 } else { 3832 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3833 } 3834 } 3835 3836 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3837 if (UseCompressedClassPointers) { 3838 assert(dst_oop != klass, "not enough registers"); 3839 encode_klass_not_null(klass); 3840 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3841 } else { 3842 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3843 } 3844 } 3845 3846 void MacroAssembler::store_klass_gap(Register s, Register d) { 3847 if (UseCompressedClassPointers) { 3848 assert(s != d, "not enough registers"); 3849 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3850 } 3851 } 3852 3853 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3854 if (UseCompressedOops) { 3855 lduw(s, d); 3856 decode_heap_oop(d); 3857 } else { 3858 ld_ptr(s, d); 3859 } 3860 } 3861 3862 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3863 if (UseCompressedOops) { 3864 lduw(s1, s2, d); 3865 decode_heap_oop(d, d); 3866 } else { 3867 ld_ptr(s1, s2, d); 3868 } 3869 } 3870 3871 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3872 if (UseCompressedOops) { 3873 lduw(s1, simm13a, d); 3874 decode_heap_oop(d, d); 3875 } else { 3876 ld_ptr(s1, simm13a, d); 3877 } 3878 } 3879 3880 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3881 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3882 else load_heap_oop(s1, s2.as_register(), d); 3883 } 3884 3885 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3886 if (UseCompressedOops) { 3887 assert(s1 != d && s2 != d, "not enough registers"); 3888 encode_heap_oop(d); 3889 st(d, s1, s2); 3890 } else { 3891 st_ptr(d, s1, s2); 3892 } 3893 } 3894 3895 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3896 if (UseCompressedOops) { 3897 assert(s1 != d, "not enough registers"); 3898 encode_heap_oop(d); 3899 st(d, s1, simm13a); 3900 } else { 3901 st_ptr(d, s1, simm13a); 3902 } 3903 } 3904 3905 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3906 if (UseCompressedOops) { 3907 assert(a.base() != d, "not enough registers"); 3908 encode_heap_oop(d); 3909 st(d, a, offset); 3910 } else { 3911 st_ptr(d, a, offset); 3912 } 3913 } 3914 3915 3916 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3917 assert (UseCompressedOops, "must be compressed"); 3918 assert (Universe::heap() != NULL, "java heap should be initialized"); 3919 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3920 verify_oop(src); 3921 if (Universe::narrow_oop_base() == NULL) { 3922 srlx(src, LogMinObjAlignmentInBytes, dst); 3923 return; 3924 } 3925 Label done; 3926 if (src == dst) { 3927 // optimize for frequent case src == dst 3928 bpr(rc_nz, true, Assembler::pt, src, done); 3929 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3930 bind(done); 3931 srlx(src, LogMinObjAlignmentInBytes, dst); 3932 } else { 3933 bpr(rc_z, false, Assembler::pn, src, done); 3934 delayed() -> mov(G0, dst); 3935 // could be moved before branch, and annulate delay, 3936 // but may add some unneeded work decoding null 3937 sub(src, G6_heapbase, dst); 3938 srlx(dst, LogMinObjAlignmentInBytes, dst); 3939 bind(done); 3940 } 3941 } 3942 3943 3944 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3945 assert (UseCompressedOops, "must be compressed"); 3946 assert (Universe::heap() != NULL, "java heap should be initialized"); 3947 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3948 verify_oop(r); 3949 if (Universe::narrow_oop_base() != NULL) 3950 sub(r, G6_heapbase, r); 3951 srlx(r, LogMinObjAlignmentInBytes, r); 3952 } 3953 3954 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3955 assert (UseCompressedOops, "must be compressed"); 3956 assert (Universe::heap() != NULL, "java heap should be initialized"); 3957 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3958 verify_oop(src); 3959 if (Universe::narrow_oop_base() == NULL) { 3960 srlx(src, LogMinObjAlignmentInBytes, dst); 3961 } else { 3962 sub(src, G6_heapbase, dst); 3963 srlx(dst, LogMinObjAlignmentInBytes, dst); 3964 } 3965 } 3966 3967 // Same algorithm as oops.inline.hpp decode_heap_oop. 3968 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3969 assert (UseCompressedOops, "must be compressed"); 3970 assert (Universe::heap() != NULL, "java heap should be initialized"); 3971 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3972 sllx(src, LogMinObjAlignmentInBytes, dst); 3973 if (Universe::narrow_oop_base() != NULL) { 3974 Label done; 3975 bpr(rc_nz, true, Assembler::pt, dst, done); 3976 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3977 bind(done); 3978 } 3979 verify_oop(dst); 3980 } 3981 3982 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3983 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3984 // pd_code_size_limit. 3985 // Also do not verify_oop as this is called by verify_oop. 3986 assert (UseCompressedOops, "must be compressed"); 3987 assert (Universe::heap() != NULL, "java heap should be initialized"); 3988 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3989 sllx(r, LogMinObjAlignmentInBytes, r); 3990 if (Universe::narrow_oop_base() != NULL) 3991 add(r, G6_heapbase, r); 3992 } 3993 3994 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3995 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3996 // pd_code_size_limit. 3997 // Also do not verify_oop as this is called by verify_oop. 3998 assert (UseCompressedOops, "must be compressed"); 3999 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4000 sllx(src, LogMinObjAlignmentInBytes, dst); 4001 if (Universe::narrow_oop_base() != NULL) 4002 add(dst, G6_heapbase, dst); 4003 } 4004 4005 void MacroAssembler::encode_klass_not_null(Register r) { 4006 assert (UseCompressedClassPointers, "must be compressed"); 4007 if (Universe::narrow_klass_base() != NULL) { 4008 assert(r != G6_heapbase, "bad register choice"); 4009 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4010 sub(r, G6_heapbase, r); 4011 if (Universe::narrow_klass_shift() != 0) { 4012 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4013 srlx(r, LogKlassAlignmentInBytes, r); 4014 } 4015 reinit_heapbase(); 4016 } else { 4017 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4018 srlx(r, Universe::narrow_klass_shift(), r); 4019 } 4020 } 4021 4022 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4023 if (src == dst) { 4024 encode_klass_not_null(src); 4025 } else { 4026 assert (UseCompressedClassPointers, "must be compressed"); 4027 if (Universe::narrow_klass_base() != NULL) { 4028 set((intptr_t)Universe::narrow_klass_base(), dst); 4029 sub(src, dst, dst); 4030 if (Universe::narrow_klass_shift() != 0) { 4031 srlx(dst, LogKlassAlignmentInBytes, dst); 4032 } 4033 } else { 4034 // shift src into dst 4035 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4036 srlx(src, Universe::narrow_klass_shift(), dst); 4037 } 4038 } 4039 } 4040 4041 // Function instr_size_for_decode_klass_not_null() counts the instructions 4042 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4043 // the instructions they generate change, then this method needs to be updated. 4044 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4045 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4046 int num_instrs = 1; // shift src,dst or add 4047 if (Universe::narrow_klass_base() != NULL) { 4048 // set + add + set 4049 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 4050 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4051 if (Universe::narrow_klass_shift() != 0) { 4052 num_instrs += 1; // sllx 4053 } 4054 } 4055 return num_instrs * BytesPerInstWord; 4056 } 4057 4058 // !!! If the instructions that get generated here change then function 4059 // instr_size_for_decode_klass_not_null() needs to get updated. 4060 void MacroAssembler::decode_klass_not_null(Register r) { 4061 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4062 // pd_code_size_limit. 4063 assert (UseCompressedClassPointers, "must be compressed"); 4064 if (Universe::narrow_klass_base() != NULL) { 4065 assert(r != G6_heapbase, "bad register choice"); 4066 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4067 if (Universe::narrow_klass_shift() != 0) 4068 sllx(r, LogKlassAlignmentInBytes, r); 4069 add(r, G6_heapbase, r); 4070 reinit_heapbase(); 4071 } else { 4072 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4073 sllx(r, Universe::narrow_klass_shift(), r); 4074 } 4075 } 4076 4077 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4078 if (src == dst) { 4079 decode_klass_not_null(src); 4080 } else { 4081 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4082 // pd_code_size_limit. 4083 assert (UseCompressedClassPointers, "must be compressed"); 4084 if (Universe::narrow_klass_base() != NULL) { 4085 if (Universe::narrow_klass_shift() != 0) { 4086 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4087 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4088 sllx(src, LogKlassAlignmentInBytes, dst); 4089 add(dst, G6_heapbase, dst); 4090 reinit_heapbase(); 4091 } else { 4092 set((intptr_t)Universe::narrow_klass_base(), dst); 4093 add(src, dst, dst); 4094 } 4095 } else { 4096 // shift/mov src into dst. 4097 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4098 sllx(src, Universe::narrow_klass_shift(), dst); 4099 } 4100 } 4101 } 4102 4103 void MacroAssembler::reinit_heapbase() { 4104 if (UseCompressedOops || UseCompressedClassPointers) { 4105 if (Universe::heap() != NULL) { 4106 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4107 } else { 4108 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4109 load_ptr_contents(base, G6_heapbase); 4110 } 4111 } 4112 } 4113 4114 #ifdef COMPILER2 4115 4116 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4117 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4118 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4119 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4120 Label Lloop, Lslow; 4121 assert(UseVIS >= 3, "VIS3 is required"); 4122 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4123 assert_different_registers(ftmp1, ftmp2, ftmp3); 4124 4125 // Check if cnt >= 8 (= 16 bytes) 4126 cmp(cnt, 8); 4127 br(Assembler::less, false, Assembler::pn, Lslow); 4128 delayed()->mov(cnt, result); // copy count 4129 4130 // Check for 8-byte alignment of src and dst 4131 or3(src, dst, tmp1); 4132 andcc(tmp1, 7, G0); 4133 br(Assembler::notZero, false, Assembler::pn, Lslow); 4134 delayed()->nop(); 4135 4136 // Set mask for bshuffle instruction 4137 Register mask = tmp4; 4138 set(0x13579bdf, mask); 4139 bmask(mask, G0, G0); 4140 4141 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4142 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4143 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4144 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4145 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4146 4147 // Load first 8 bytes 4148 ldx(src, 0, tmp1); 4149 4150 bind(Lloop); 4151 // Load next 8 bytes 4152 ldx(src, 8, tmp2); 4153 4154 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4155 // Although we have to move the data between integer and floating point registers, this is 4156 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4157 or3(tmp1, tmp2, tmp3); 4158 btst(tmp3, mask); 4159 // annul zeroing if branch is not taken to preserve original count 4160 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4161 delayed()->mov(G0, result); // 0 - failed 4162 4163 // Move bytes into float register 4164 movxtod(tmp1, ftmp1); 4165 movxtod(tmp2, ftmp2); 4166 4167 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4168 bshuffle(ftmp1, ftmp2, ftmp3); 4169 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4170 4171 // Increment addresses and decrement count 4172 inc(src, 16); 4173 inc(dst, 8); 4174 dec(cnt, 8); 4175 4176 cmp(cnt, 8); 4177 // annul LDX if branch is not taken to prevent access past end of string 4178 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4179 delayed()->ldx(src, 0, tmp1); 4180 4181 // Fallback to slow version 4182 bind(Lslow); 4183 } 4184 4185 // Compress char[] to byte[]. Return 0 on failure. 4186 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4187 Label Lloop; 4188 assert_different_registers(src, dst, cnt, tmp, result); 4189 4190 lduh(src, 0, tmp); 4191 4192 bind(Lloop); 4193 inc(src, sizeof(jchar)); 4194 cmp(tmp, 0xff); 4195 // annul zeroing if branch is not taken to preserve original count 4196 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4197 delayed()->mov(G0, result); // 0 - failed 4198 deccc(cnt); 4199 stb(tmp, dst, 0); 4200 inc(dst); 4201 // annul LDUH if branch is not taken to prevent access past end of string 4202 br(Assembler::notZero, true, Assembler::pt, Lloop); 4203 delayed()->lduh(src, 0, tmp); // hoisted 4204 } 4205 4206 // Inflate byte[] to char[] by inflating 16 bytes at once. 4207 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4208 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4209 Label Lloop, Lslow; 4210 assert(UseVIS >= 3, "VIS3 is required"); 4211 assert_different_registers(src, dst, cnt, tmp); 4212 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4213 4214 // Check if cnt >= 8 (= 16 bytes) 4215 cmp(cnt, 8); 4216 br(Assembler::less, false, Assembler::pn, Lslow); 4217 delayed()->nop(); 4218 4219 // Check for 8-byte alignment of src and dst 4220 or3(src, dst, tmp); 4221 andcc(tmp, 7, G0); 4222 br(Assembler::notZero, false, Assembler::pn, Lslow); 4223 // Initialize float register to zero 4224 FloatRegister zerof = ftmp4; 4225 delayed()->fzero(FloatRegisterImpl::D, zerof); 4226 4227 // Load first 8 bytes 4228 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4229 4230 bind(Lloop); 4231 inc(src, 8); 4232 dec(cnt, 8); 4233 4234 // Inflate the string by interleaving each byte from the source array 4235 // with a zero byte and storing the result in the destination array. 4236 fpmerge(zerof, ftmp1->successor(), ftmp2); 4237 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4238 fpmerge(zerof, ftmp1, ftmp3); 4239 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4240 4241 inc(dst, 16); 4242 4243 cmp(cnt, 8); 4244 // annul LDX if branch is not taken to prevent access past end of string 4245 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4246 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4247 4248 // Fallback to slow version 4249 bind(Lslow); 4250 } 4251 4252 // Inflate byte[] to char[]. 4253 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4254 Label Loop; 4255 assert_different_registers(src, dst, cnt, tmp); 4256 4257 ldub(src, 0, tmp); 4258 bind(Loop); 4259 inc(src); 4260 deccc(cnt); 4261 sth(tmp, dst, 0); 4262 inc(dst, sizeof(jchar)); 4263 // annul LDUB if branch is not taken to prevent access past end of string 4264 br(Assembler::notZero, true, Assembler::pt, Loop); 4265 delayed()->ldub(src, 0, tmp); // hoisted 4266 } 4267 4268 void MacroAssembler::string_compare(Register str1, Register str2, 4269 Register cnt1, Register cnt2, 4270 Register tmp1, Register tmp2, 4271 Register result, int ae) { 4272 Label Ldone, Lloop; 4273 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4274 int stride1, stride2; 4275 4276 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4277 // we interchange str1 and str2 in the UL case and negate the result. 4278 // Like this, str1 is always latin1 encoded, expect for the UU case. 4279 4280 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4281 srl(cnt2, 1, cnt2); 4282 } 4283 4284 // See if the lengths are different, and calculate min in cnt1. 4285 // Save diff in case we need it for a tie-breaker. 4286 Label Lskip; 4287 Register diff = tmp1; 4288 subcc(cnt1, cnt2, diff); 4289 br(Assembler::greater, true, Assembler::pt, Lskip); 4290 // cnt2 is shorter, so use its count: 4291 delayed()->mov(cnt2, cnt1); 4292 bind(Lskip); 4293 4294 // Rename registers 4295 Register limit1 = cnt1; 4296 Register limit2 = limit1; 4297 Register chr1 = result; 4298 Register chr2 = cnt2; 4299 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4300 // We need an additional register to keep track of two limits 4301 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4302 limit2 = tmp2; 4303 } 4304 4305 // Is the minimum length zero? 4306 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4307 br(Assembler::equal, true, Assembler::pn, Ldone); 4308 // result is difference in lengths 4309 if (ae == StrIntrinsicNode::UU) { 4310 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4311 } else { 4312 delayed()->mov(diff, result); 4313 } 4314 4315 // Load first characters 4316 if (ae == StrIntrinsicNode::LL) { 4317 stride1 = stride2 = sizeof(jbyte); 4318 ldub(str1, 0, chr1); 4319 ldub(str2, 0, chr2); 4320 } else if (ae == StrIntrinsicNode::UU) { 4321 stride1 = stride2 = sizeof(jchar); 4322 lduh(str1, 0, chr1); 4323 lduh(str2, 0, chr2); 4324 } else { 4325 stride1 = sizeof(jbyte); 4326 stride2 = sizeof(jchar); 4327 ldub(str1, 0, chr1); 4328 lduh(str2, 0, chr2); 4329 } 4330 4331 // Compare first characters 4332 subcc(chr1, chr2, chr1); 4333 br(Assembler::notZero, false, Assembler::pt, Ldone); 4334 assert(chr1 == result, "result must be pre-placed"); 4335 delayed()->nop(); 4336 4337 // Check if the strings start at same location 4338 cmp(str1, str2); 4339 brx(Assembler::equal, true, Assembler::pn, Ldone); 4340 delayed()->mov(G0, result); // result is zero 4341 4342 // We have no guarantee that on 64 bit the higher half of limit is 0 4343 signx(limit1); 4344 4345 // Get limit 4346 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4347 sll(limit1, 1, limit2); 4348 subcc(limit2, stride2, chr2); 4349 } 4350 subcc(limit1, stride1, chr1); 4351 br(Assembler::zero, true, Assembler::pn, Ldone); 4352 // result is difference in lengths 4353 if (ae == StrIntrinsicNode::UU) { 4354 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4355 } else { 4356 delayed()->mov(diff, result); 4357 } 4358 4359 // Shift str1 and str2 to the end of the arrays, negate limit 4360 add(str1, limit1, str1); 4361 add(str2, limit2, str2); 4362 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4363 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4364 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4365 } 4366 4367 // Compare the rest of the characters 4368 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4369 4370 bind(Lloop); 4371 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4372 4373 subcc(chr1, chr2, chr1); 4374 br(Assembler::notZero, false, Assembler::pt, Ldone); 4375 assert(chr1 == result, "result must be pre-placed"); 4376 delayed()->inccc(limit1, stride1); 4377 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4378 inccc(limit2, stride2); 4379 } 4380 4381 // annul LDUB if branch is not taken to prevent access past end of string 4382 br(Assembler::notZero, true, Assembler::pt, Lloop); 4383 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4384 4385 // If strings are equal up to min length, return the length difference. 4386 if (ae == StrIntrinsicNode::UU) { 4387 // Divide by 2 to get number of chars 4388 sra(diff, 1, result); 4389 } else { 4390 mov(diff, result); 4391 } 4392 4393 // Otherwise, return the difference between the first mismatched chars. 4394 bind(Ldone); 4395 if(ae == StrIntrinsicNode::UL) { 4396 // Negate result (see note above) 4397 neg(result); 4398 } 4399 } 4400 4401 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4402 Register limit, Register tmp, Register result, bool is_byte) { 4403 Label Ldone, Lloop, Lremaining; 4404 assert_different_registers(ary1, ary2, limit, tmp, result); 4405 4406 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4407 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4408 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4409 4410 if (is_array_equ) { 4411 // return true if the same array 4412 cmp(ary1, ary2); 4413 brx(Assembler::equal, true, Assembler::pn, Ldone); 4414 delayed()->mov(1, result); // equal 4415 4416 br_null(ary1, true, Assembler::pn, Ldone); 4417 delayed()->clr(result); // not equal 4418 4419 br_null(ary2, true, Assembler::pn, Ldone); 4420 delayed()->clr(result); // not equal 4421 4422 // load the lengths of arrays 4423 ld(Address(ary1, length_offset), limit); 4424 ld(Address(ary2, length_offset), tmp); 4425 4426 // return false if the two arrays are not equal length 4427 cmp(limit, tmp); 4428 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4429 delayed()->clr(result); // not equal 4430 } 4431 4432 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4433 delayed()->mov(1, result); // zero-length arrays are equal 4434 4435 if (is_array_equ) { 4436 // load array addresses 4437 add(ary1, base_offset, ary1); 4438 add(ary2, base_offset, ary2); 4439 // set byte count 4440 if (!is_byte) { 4441 sll(limit, exact_log2(sizeof(jchar)), limit); 4442 } 4443 } else { 4444 // We have no guarantee that on 64 bit the higher half of limit is 0 4445 signx(limit); 4446 } 4447 4448 #ifdef ASSERT 4449 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4450 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4451 Label Laligned; 4452 or3(ary1, ary2, tmp); 4453 andcc(tmp, 7, tmp); 4454 br_null_short(tmp, Assembler::pn, Laligned); 4455 STOP("First array element is not 8-byte aligned."); 4456 should_not_reach_here(); 4457 bind(Laligned); 4458 #endif 4459 4460 // Shift ary1 and ary2 to the end of the arrays, negate limit 4461 add(ary1, limit, ary1); 4462 add(ary2, limit, ary2); 4463 neg(limit, limit); 4464 4465 // MAIN LOOP 4466 // Load and compare array elements of size 'byte_width' until the elements are not 4467 // equal or we reached the end of the arrays. If the size of the arrays is not a 4468 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4469 // compare the remaining bytes below by skipping the garbage bytes. 4470 ldx(ary1, limit, result); 4471 bind(Lloop); 4472 ldx(ary2, limit, tmp); 4473 inccc(limit, 8); 4474 // Bail out if we reached the end (but still do the comparison) 4475 br(Assembler::positive, false, Assembler::pn, Lremaining); 4476 delayed()->cmp(result, tmp); 4477 // Check equality of elements 4478 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4479 delayed()->ldx(ary1, limit, result); 4480 4481 ba(Ldone); 4482 delayed()->clr(result); // not equal 4483 4484 // TAIL COMPARISON 4485 // We got here because we reached the end of the arrays. 'limit' is the number of 4486 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4487 // out the garbage and compare the remaining elements. 4488 bind(Lremaining); 4489 // Optimistic shortcut: elements potentially including garbage are equal 4490 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4491 delayed()->mov(1, result); // equal 4492 // Shift 'limit' bytes to the right and compare 4493 sll(limit, 3, limit); // bytes to bits 4494 srlx(result, limit, result); 4495 srlx(tmp, limit, tmp); 4496 cmp(result, tmp); 4497 clr(result); 4498 movcc(Assembler::equal, false, xcc, 1, result); 4499 4500 bind(Ldone); 4501 } 4502 4503 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4504 4505 // test for negative bytes in input string of a given size 4506 // result 1 if found, 0 otherwise. 4507 4508 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4509 4510 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4511 4512 Register i = result; // result used as integer index i until very end 4513 Register lmask = t2; // t2 is aliased to lmask 4514 4515 // INITIALIZATION 4516 // =========================================================== 4517 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4518 // compute unaligned offset -> i 4519 // compute core end index -> t5 4520 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4521 add(t2, 0x80, t2); 4522 sllx(t2, 32, t3); 4523 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4524 sra(size,0,size); 4525 andcc(inp, 0x7, i); // unaligned offset -> i 4526 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4527 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4528 4529 // =========================================================== 4530 4531 // UNALIGNED HEAD 4532 // =========================================================== 4533 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4534 // * obliterate (ignore) bytes outside string by shifting off reg ends 4535 // * compare with bitmask, short circuit return true if one or more high 4536 // bits set. 4537 cmp(size, 0); 4538 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4539 delayed()->mov(0,result); // annuled so i not clobbered for following 4540 neg(i, t4); 4541 add(i, size, t5); 4542 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4543 mov(8, t4); 4544 sub(t4, t5, t4); 4545 sra(t4, 31, t5); 4546 andn(t4, t5, t5); 4547 add(i, t5, t4); 4548 sll(t5, 3, t5); 4549 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4550 srlx(t3, t5, t3); 4551 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4552 andcc(lmask, t3, G0); 4553 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4554 delayed()->mov(1,result); // annuled so i not clobbered for following 4555 add(size, -8, t5); // core end index -> t5 4556 mov(8, t4); 4557 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4558 // =========================================================== 4559 4560 // ALIGNED CORE 4561 // =========================================================== 4562 // * iterate index i over aligned 8B sections of core, comparing with 4563 // bitmask, short circuit return true if one or more high bits set 4564 // t5 contains core end index/loop limit which is the index 4565 // of the MSB of last (unaligned) 8B fully contained in the string. 4566 // inp contains address of first byte in string/array 4567 // lmask contains 8B high bit mask for comparison 4568 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4569 bind(Lcore); 4570 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4571 bind(Lcore_rpt); 4572 ldx(inp, i, t3); 4573 andcc(t3, lmask, G0); 4574 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4575 delayed()->mov(1, result); // annuled so i not clobbered for following 4576 add(i, 8, i); 4577 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4578 // =========================================================== 4579 4580 // ALIGNED TAIL (<8B) 4581 // =========================================================== 4582 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4583 // string bytes by shifting them off end, compare what's left with bitmask 4584 // inp contains address of first byte in string/array 4585 // lmask contains 8B high bit mask for comparison 4586 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4587 bind(Ltail); 4588 subcc(size, i, t4); // # of remaining bytes in string -> t4 4589 // return 0 if no more remaining bytes 4590 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4591 delayed()->mov(0, result); // annuled so i not clobbered for following 4592 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4593 mov(8, t5); 4594 sub(t5, t4, t4); 4595 mov(0, result); // ** i clobbered at this point 4596 sll(t4, 3, t4); // bits beyond end of string -> t4 4597 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4598 andcc(lmask, t3, G0); 4599 movcc(Assembler::notZero, false, xcc, 1, result); 4600 bind(Lreturn); 4601 } 4602 4603 #endif 4604 4605 4606 // Use BIS for zeroing (count is in bytes). 4607 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4608 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4609 Register end = count; 4610 int cache_line_size = VM_Version::prefetch_data_size(); 4611 assert(cache_line_size > 0, "cache line size should be known for this code"); 4612 // Minimum count when BIS zeroing can be used since 4613 // it needs membar which is expensive. 4614 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4615 4616 Label small_loop; 4617 // Check if count is negative (dead code) or zero. 4618 // Note, count uses 64bit in 64 bit VM. 4619 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4620 4621 // Use BIS zeroing only for big arrays since it requires membar. 4622 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4623 cmp(count, block_zero_size); 4624 } else { 4625 set(block_zero_size, temp); 4626 cmp(count, temp); 4627 } 4628 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4629 delayed()->add(to, count, end); 4630 4631 // Note: size is >= three (32 bytes) cache lines. 4632 4633 // Clean the beginning of space up to next cache line. 4634 for (int offs = 0; offs < cache_line_size; offs += 8) { 4635 stx(G0, to, offs); 4636 } 4637 4638 // align to next cache line 4639 add(to, cache_line_size, to); 4640 and3(to, -cache_line_size, to); 4641 4642 // Note: size left >= two (32 bytes) cache lines. 4643 4644 // BIS should not be used to zero tail (64 bytes) 4645 // to avoid zeroing a header of the following object. 4646 sub(end, (cache_line_size*2)-8, end); 4647 4648 Label bis_loop; 4649 bind(bis_loop); 4650 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4651 add(to, cache_line_size, to); 4652 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4653 4654 // BIS needs membar. 4655 membar(Assembler::StoreLoad); 4656 4657 add(end, (cache_line_size*2)-8, end); // restore end 4658 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4659 4660 // Clean the tail. 4661 bind(small_loop); 4662 stx(G0, to, 0); 4663 add(to, 8, to); 4664 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4665 nop(); // Separate short branches 4666 } 4667 4668 /** 4669 * Update CRC-32[C] with a byte value according to constants in table 4670 * 4671 * @param [in,out]crc Register containing the crc. 4672 * @param [in]val Register containing the byte to fold into the CRC. 4673 * @param [in]table Register containing the table of crc constants. 4674 * 4675 * uint32_t crc; 4676 * val = crc_table[(val ^ crc) & 0xFF]; 4677 * crc = val ^ (crc >> 8); 4678 */ 4679 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4680 xor3(val, crc, val); 4681 and3(val, 0xFF, val); 4682 sllx(val, 2, val); 4683 lduw(table, val, val); 4684 srlx(crc, 8, crc); 4685 xor3(val, crc, crc); 4686 } 4687 4688 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4689 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4690 srlx(src, 24, dst); 4691 4692 sllx(src, 32+8, tmp); 4693 srlx(tmp, 32+24, tmp); 4694 sllx(tmp, 8, tmp); 4695 or3(dst, tmp, dst); 4696 4697 sllx(src, 32+16, tmp); 4698 srlx(tmp, 32+24, tmp); 4699 sllx(tmp, 16, tmp); 4700 or3(dst, tmp, dst); 4701 4702 sllx(src, 32+24, tmp); 4703 srlx(tmp, 32, tmp); 4704 or3(dst, tmp, dst); 4705 } 4706 4707 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4708 reverse_bytes_32(src, tmp1, tmp2); 4709 movxtod(tmp1, dst); 4710 } 4711 4712 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4713 movdtox(src, tmp1); 4714 reverse_bytes_32(tmp1, dst, tmp2); 4715 } 4716 4717 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4718 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4719 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4720 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4721 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4722 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4723 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4724 ldxl(buf, G0, xtmp_lo); 4725 inc(buf, 8); 4726 ldxl(buf, G0, xtmp_hi); 4727 inc(buf, 8); 4728 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4729 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4730 } 4731 4732 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4733 mov(xcrc_lo, xtmp_lo); 4734 mov(xcrc_hi, xtmp_hi); 4735 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4736 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4737 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4738 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4739 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4740 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4741 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4742 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4743 } 4744 4745 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4746 and3(xcrc, 0xFF, tmp); 4747 sllx(tmp, 2, tmp); 4748 lduw(table, tmp, xtmp); 4749 srlx(xcrc, 8, xcrc); 4750 xor3(xtmp, xcrc, xcrc); 4751 } 4752 4753 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4754 and3(crc, 0xFF, tmp); 4755 srlx(crc, 8, crc); 4756 sllx(tmp, 2, tmp); 4757 lduw(table, tmp, tmp); 4758 xor3(tmp, crc, crc); 4759 } 4760 4761 #define CRC32_TMP_REG_NUM 18 4762 4763 #define CRC32_CONST_64 0x163cd6124 4764 #define CRC32_CONST_96 0x0ccaa009e 4765 #define CRC32_CONST_160 0x1751997d0 4766 #define CRC32_CONST_480 0x1c6e41596 4767 #define CRC32_CONST_544 0x154442bd4 4768 4769 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4770 4771 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4772 Label L_main_loop_prologue; 4773 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4774 Label L_fold_tail, L_fold_tail_loop; 4775 Label L_8byte_fold_loop, L_8byte_fold_check; 4776 4777 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4778 4779 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4780 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4781 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4782 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4783 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4784 4785 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4786 4787 not1(crc); // ~c 4788 clruwu(crc); // clear upper 32 bits of crc 4789 4790 // Check if below cutoff, proceed directly to cleanup code 4791 mov(31, G4); 4792 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4793 4794 // Align buffer to 8 byte boundry 4795 mov(8, O5); 4796 and3(buf, 0x7, O4); 4797 sub(O5, O4, O5); 4798 and3(O5, 0x7, O5); 4799 sub(len, O5, len); 4800 ba(L_align_check); 4801 delayed()->nop(); 4802 4803 // Alignment loop, table look up method for up to 7 bytes 4804 bind(L_align_loop); 4805 ldub(buf, 0, O4); 4806 inc(buf); 4807 dec(O5); 4808 xor3(O4, crc, O4); 4809 and3(O4, 0xFF, O4); 4810 sllx(O4, 2, O4); 4811 lduw(table, O4, O4); 4812 srlx(crc, 8, crc); 4813 xor3(O4, crc, crc); 4814 bind(L_align_check); 4815 nop(); 4816 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4817 4818 // Aligned on 64-bit (8-byte) boundry at this point 4819 // Check if still above cutoff (31-bytes) 4820 mov(31, G4); 4821 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4822 // At least 32 bytes left to process 4823 4824 // Free up registers by storing them to FP registers 4825 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4826 movxtod(tmp[i], as_FloatRegister(2*i)); 4827 } 4828 4829 // Determine which loop to enter 4830 // Shared prologue 4831 ldxl(buf, G0, tmp[0]); 4832 inc(buf, 8); 4833 ldxl(buf, G0, tmp[1]); 4834 inc(buf, 8); 4835 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4836 and3(crc, 0, crc); // Clear out the crc register 4837 // Main loop needs 128-bytes at least 4838 mov(128, G4); 4839 mov(64, tmp[2]); 4840 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4841 // Less than 64 bytes 4842 nop(); 4843 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4844 // Between 64 and 127 bytes 4845 set64(CRC32_CONST_96, const_96, tmp[8]); 4846 set64(CRC32_CONST_160, const_160, tmp[9]); 4847 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4848 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4849 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4850 dec(len, 48); 4851 ba(L_fold_tail); 4852 delayed()->nop(); 4853 4854 bind(L_main_loop_prologue); 4855 for (int i = 2; i < 8; i++) { 4856 ldxl(buf, G0, tmp[i]); 4857 inc(buf, 8); 4858 } 4859 4860 // Fold total 512 bits of polynomial on each iteration, 4861 // 128 bits per each of 4 parallel streams 4862 set64(CRC32_CONST_480, const_480, tmp[8]); 4863 set64(CRC32_CONST_544, const_544, tmp[9]); 4864 4865 mov(128, G4); 4866 bind(L_fold_512b_loop); 4867 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4868 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4869 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4870 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4871 dec(len, 64); 4872 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4873 4874 // Fold 512 bits to 128 bits 4875 bind(L_fold_512b); 4876 set64(CRC32_CONST_96, const_96, tmp[8]); 4877 set64(CRC32_CONST_160, const_160, tmp[9]); 4878 4879 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4880 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4881 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4882 dec(len, 48); 4883 4884 // Fold the rest of 128 bits data chunks 4885 bind(L_fold_tail); 4886 mov(32, G4); 4887 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4888 4889 set64(CRC32_CONST_96, const_96, tmp[8]); 4890 set64(CRC32_CONST_160, const_160, tmp[9]); 4891 4892 bind(L_fold_tail_loop); 4893 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4894 sub(len, 16, len); 4895 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4896 4897 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4898 bind(L_fold_128b); 4899 4900 set64(CRC32_CONST_64, const_64, tmp[4]); 4901 4902 xmulx(const_64, tmp[0], tmp[2]); 4903 xmulxhi(const_64, tmp[0], tmp[3]); 4904 4905 srl(tmp[2], G0, tmp[4]); 4906 xmulx(const_64, tmp[4], tmp[4]); 4907 4908 srlx(tmp[2], 32, tmp[2]); 4909 sllx(tmp[3], 32, tmp[3]); 4910 or3(tmp[2], tmp[3], tmp[2]); 4911 4912 xor3(tmp[4], tmp[1], tmp[4]); 4913 xor3(tmp[4], tmp[2], tmp[1]); 4914 dec(len, 8); 4915 4916 // Use table lookup for the 8 bytes left in tmp[1] 4917 dec(len, 8); 4918 4919 // 8 8-bit folds to compute 32-bit CRC. 4920 for (int j = 0; j < 4; j++) { 4921 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4922 } 4923 srl(tmp[1], G0, crc); // move 32 bits to general register 4924 for (int j = 0; j < 4; j++) { 4925 fold_8bit_crc32(crc, table, tmp[3]); 4926 } 4927 4928 bind(L_8byte_fold_check); 4929 4930 // Restore int registers saved in FP registers 4931 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4932 movdtox(as_FloatRegister(2*i), tmp[i]); 4933 } 4934 4935 ba(L_cleanup_check); 4936 delayed()->nop(); 4937 4938 // Table look-up method for the remaining few bytes 4939 bind(L_cleanup_loop); 4940 ldub(buf, 0, O4); 4941 inc(buf); 4942 dec(len); 4943 xor3(O4, crc, O4); 4944 and3(O4, 0xFF, O4); 4945 sllx(O4, 2, O4); 4946 lduw(table, O4, O4); 4947 srlx(crc, 8, crc); 4948 xor3(O4, crc, crc); 4949 bind(L_cleanup_check); 4950 nop(); 4951 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4952 4953 not1(crc); 4954 } 4955 4956 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4957 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4958 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4959 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4960 4961 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4962 4963 Label L_crc32c_head, L_crc32c_aligned; 4964 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4965 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4966 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4967 4968 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4969 4970 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4971 4972 // clear upper 32 bits of crc 4973 clruwu(crc); 4974 4975 and3(buf, 7, G4); 4976 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4977 4978 mov(8, G1); 4979 sub(G1, G4, G4); 4980 4981 // ------ process the misaligned head (7 bytes or less) ------ 4982 bind(L_crc32c_head); 4983 4984 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4985 ldub(buf, 0, G1); 4986 update_byte_crc32(crc, G1, table); 4987 4988 inc(buf); 4989 dec(len); 4990 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4991 dec(G4); 4992 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4993 4994 // ------ process the 8-byte-aligned body ------ 4995 bind(L_crc32c_aligned); 4996 nop(); 4997 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4998 4999 // reverse the byte order of lower 32 bits to big endian, and move to FP side 5000 movitof_revbytes(crc, F0, G1, G3); 5001 5002 set(CHUNK_LEN*8*4, G4); 5003 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 5004 5005 // ------ process four 1KB chunks in parallel ------ 5006 bind(L_crc32c_parallel); 5007 5008 fzero(FloatRegisterImpl::D, F2); 5009 fzero(FloatRegisterImpl::D, F4); 5010 fzero(FloatRegisterImpl::D, F6); 5011 5012 mov(CHUNK_LEN - 1, G4); 5013 bind(L_crc32c_parallel_loop); 5014 // schedule ldf's ahead of crc32c's to hide the load-use latency 5015 ldf(FloatRegisterImpl::D, buf, 0, F8); 5016 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5017 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5018 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 5019 crc32c(F0, F8, F0); 5020 crc32c(F2, F10, F2); 5021 crc32c(F4, F12, F4); 5022 crc32c(F6, F14, F6); 5023 inc(buf, 8); 5024 dec(G4); 5025 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 5026 5027 ldf(FloatRegisterImpl::D, buf, 0, F8); 5028 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5029 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5030 crc32c(F0, F8, F0); 5031 crc32c(F2, F10, F2); 5032 crc32c(F4, F12, F4); 5033 5034 inc(buf, CHUNK_LEN*24); 5035 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 5036 inc(buf, 8); 5037 5038 prefetch(buf, 0, Assembler::severalReads); 5039 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 5040 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 5041 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 5042 5043 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5044 movftoi_revbytes(F0, O4, G1, G4); 5045 movftoi_revbytes(F2, O5, G1, G4); 5046 movftoi_revbytes(F4, G5, G1, G4); 5047 5048 // combine the results of 4 chunks 5049 set64(CHUNK_K1, G3, G1); 5050 xmulx(O4, G3, O4); 5051 set64(CHUNK_K2, G3, G1); 5052 xmulx(O5, G3, O5); 5053 set64(CHUNK_K3, G3, G1); 5054 xmulx(G5, G3, G5); 5055 5056 movdtox(F14, G4); 5057 xor3(O4, O5, O5); 5058 xor3(G5, O5, O5); 5059 xor3(G4, O5, O5); 5060 5061 // reverse the byte order to big endian, via stack, and move to FP side 5062 // TODO: use new revb instruction 5063 add(SP, -8, G1); 5064 srlx(G1, 3, G1); 5065 sllx(G1, 3, G1); 5066 stx(O5, G1, G0); 5067 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5068 5069 crc32c(F6, F2, F0); 5070 5071 set(CHUNK_LEN*8*4, G4); 5072 sub(len, G4, len); 5073 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5074 nop(); 5075 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5076 5077 bind(L_crc32c_serial); 5078 5079 mov(32, G4); 5080 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5081 5082 // ------ process 32B chunks ------ 5083 bind(L_crc32c_x32_loop); 5084 ldf(FloatRegisterImpl::D, buf, 0, F2); 5085 crc32c(F0, F2, F0); 5086 ldf(FloatRegisterImpl::D, buf, 8, F2); 5087 crc32c(F0, F2, F0); 5088 ldf(FloatRegisterImpl::D, buf, 16, F2); 5089 crc32c(F0, F2, F0); 5090 ldf(FloatRegisterImpl::D, buf, 24, F2); 5091 inc(buf, 32); 5092 crc32c(F0, F2, F0); 5093 dec(len, 32); 5094 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5095 5096 bind(L_crc32c_x8); 5097 nop(); 5098 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5099 5100 // ------ process 8B chunks ------ 5101 bind(L_crc32c_x8_loop); 5102 ldf(FloatRegisterImpl::D, buf, 0, F2); 5103 inc(buf, 8); 5104 crc32c(F0, F2, F0); 5105 dec(len, 8); 5106 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5107 5108 bind(L_crc32c_done); 5109 5110 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5111 movftoi_revbytes(F0, crc, G1, G3); 5112 5113 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5114 5115 // ------ process the misaligned tail (7 bytes or less) ------ 5116 bind(L_crc32c_tail); 5117 5118 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5119 ldub(buf, 0, G1); 5120 update_byte_crc32(crc, G1, table); 5121 5122 inc(buf); 5123 dec(len); 5124 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5125 5126 bind(L_crc32c_return); 5127 nop(); 5128 }