1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/klass.inline.hpp" 34 #include "prims/jvm.h" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/interfaceSupport.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/os.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/macros.hpp" 44 #if INCLUDE_ALL_GCS 45 #include "gc/g1/g1CollectedHeap.inline.hpp" 46 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 47 #include "gc/g1/heapRegion.hpp" 48 #endif // INCLUDE_ALL_GCS 49 #ifdef COMPILER2 50 #include "opto/intrinsicnode.hpp" 51 #endif 52 53 #ifdef PRODUCT 54 #define BLOCK_COMMENT(str) /* nothing */ 55 #define STOP(error) stop(error) 56 #else 57 #define BLOCK_COMMENT(str) block_comment(str) 58 #define STOP(error) block_comment(error); stop(error) 59 #endif 60 61 // Convert the raw encoding form into the form expected by the 62 // constructor for Address. 63 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 64 assert(scale == 0, "not supported"); 65 RelocationHolder rspec; 66 if (disp_reloc != relocInfo::none) { 67 rspec = Relocation::spec_simple(disp_reloc); 68 } 69 70 Register rindex = as_Register(index); 71 if (rindex != G0) { 72 Address madr(as_Register(base), rindex); 73 madr._rspec = rspec; 74 return madr; 75 } else { 76 Address madr(as_Register(base), disp); 77 madr._rspec = rspec; 78 return madr; 79 } 80 } 81 82 Address Argument::address_in_frame() const { 83 // Warning: In LP64 mode disp will occupy more than 10 bits, but 84 // op codes such as ld or ldx, only access disp() to get 85 // their simm13 argument. 86 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 87 if (is_in()) 88 return Address(FP, disp); // In argument. 89 else 90 return Address(SP, disp); // Out argument. 91 } 92 93 static const char* argumentNames[][2] = { 94 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 95 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 96 {"A(n>9)","P(n>9)"} 97 }; 98 99 const char* Argument::name() const { 100 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 101 int num = number(); 102 if (num >= nofArgs) num = nofArgs - 1; 103 return argumentNames[num][is_in() ? 1 : 0]; 104 } 105 106 #ifdef ASSERT 107 // On RISC, there's no benefit to verifying instruction boundaries. 108 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 109 #endif 110 111 // Patch instruction inst at offset inst_pos to refer to dest_pos 112 // and return the resulting instruction. 113 // We should have pcs, not offsets, but since all is relative, it will work out 114 // OK. 115 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 116 int m; // mask for displacement field 117 int v; // new value for displacement field 118 const int word_aligned_ones = -4; 119 switch (inv_op(inst)) { 120 default: ShouldNotReachHere(); 121 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 122 case branch_op: 123 switch (inv_op2(inst)) { 124 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 125 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 126 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 127 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 128 case bpr_op2: { 129 if (is_cbcond(inst)) { 130 m = wdisp10(word_aligned_ones, 0); 131 v = wdisp10(dest_pos, inst_pos); 132 } else { 133 m = wdisp16(word_aligned_ones, 0); 134 v = wdisp16(dest_pos, inst_pos); 135 } 136 break; 137 } 138 default: ShouldNotReachHere(); 139 } 140 } 141 return inst & ~m | v; 142 } 143 144 // Return the offset of the branch destionation of instruction inst 145 // at offset pos. 146 // Should have pcs, but since all is relative, it works out. 147 int MacroAssembler::branch_destination(int inst, int pos) { 148 int r; 149 switch (inv_op(inst)) { 150 default: ShouldNotReachHere(); 151 case call_op: r = inv_wdisp(inst, pos, 30); break; 152 case branch_op: 153 switch (inv_op2(inst)) { 154 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 155 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 156 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 157 case br_op2: r = inv_wdisp( inst, pos, 22); break; 158 case bpr_op2: { 159 if (is_cbcond(inst)) { 160 r = inv_wdisp10(inst, pos); 161 } else { 162 r = inv_wdisp16(inst, pos); 163 } 164 break; 165 } 166 default: ShouldNotReachHere(); 167 } 168 } 169 return r; 170 } 171 172 void MacroAssembler::null_check(Register reg, int offset) { 173 if (needs_explicit_null_check((intptr_t)offset)) { 174 // provoke OS NULL exception if reg = NULL by 175 // accessing M[reg] w/o changing any registers 176 ld_ptr(reg, 0, G0); 177 } 178 else { 179 // nothing to do, (later) access of M[reg + offset] 180 // will provoke OS NULL exception if reg = NULL 181 } 182 } 183 184 // Ring buffer jumps 185 186 187 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 188 assert_not_delayed(); 189 jmpl(r1, r2, G0); 190 } 191 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 192 assert_not_delayed(); 193 jmp(r1, offset); 194 } 195 196 // This code sequence is relocatable to any address, even on LP64. 197 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 198 assert_not_delayed(); 199 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 200 // variable length instruction streams. 201 patchable_sethi(addrlit, temp); 202 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 203 jmpl(a.base(), a.disp(), d); 204 } 205 206 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 207 jumpl(addrlit, temp, G0, offset, file, line); 208 } 209 210 211 // Conditional breakpoint (for assertion checks in assembly code) 212 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 213 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 214 } 215 216 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 217 void MacroAssembler::breakpoint_trap() { 218 trap(ST_RESERVED_FOR_USER_0); 219 } 220 221 // Write serialization page so VM thread can do a pseudo remote membar 222 // We use the current thread pointer to calculate a thread specific 223 // offset to write to within the page. This minimizes bus traffic 224 // due to cache line collision. 225 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 226 srl(thread, os::get_serialize_page_shift_count(), tmp2); 227 if (Assembler::is_simm13(os::vm_page_size())) { 228 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 229 } 230 else { 231 set((os::vm_page_size() - sizeof(int)), tmp1); 232 and3(tmp2, tmp1, tmp2); 233 } 234 set(os::get_memory_serialize_page(), tmp1); 235 st(G0, tmp1, tmp2); 236 } 237 238 239 240 void MacroAssembler::enter() { 241 Unimplemented(); 242 } 243 244 void MacroAssembler::leave() { 245 Unimplemented(); 246 } 247 248 // Calls to C land 249 250 #ifdef ASSERT 251 // a hook for debugging 252 static Thread* reinitialize_thread() { 253 return Thread::current(); 254 } 255 #else 256 #define reinitialize_thread Thread::current 257 #endif 258 259 #ifdef ASSERT 260 address last_get_thread = NULL; 261 #endif 262 263 // call this when G2_thread is not known to be valid 264 void MacroAssembler::get_thread() { 265 save_frame(0); // to avoid clobbering O0 266 mov(G1, L0); // avoid clobbering G1 267 mov(G5_method, L1); // avoid clobbering G5 268 mov(G3, L2); // avoid clobbering G3 also 269 mov(G4, L5); // avoid clobbering G4 270 #ifdef ASSERT 271 AddressLiteral last_get_thread_addrlit(&last_get_thread); 272 set(last_get_thread_addrlit, L3); 273 rdpc(L4); 274 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 275 #endif 276 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 277 delayed()->nop(); 278 mov(L0, G1); 279 mov(L1, G5_method); 280 mov(L2, G3); 281 mov(L5, G4); 282 restore(O0, 0, G2_thread); 283 } 284 285 static Thread* verify_thread_subroutine(Thread* gthread_value) { 286 Thread* correct_value = Thread::current(); 287 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 288 return correct_value; 289 } 290 291 void MacroAssembler::verify_thread() { 292 if (VerifyThread) { 293 // NOTE: this chops off the heads of the 64-bit O registers. 294 // make sure G2_thread contains the right value 295 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 296 mov(G1, L1); // avoid clobbering G1 297 // G2 saved below 298 mov(G3, L3); // avoid clobbering G3 299 mov(G4, L4); // avoid clobbering G4 300 mov(G5_method, L5); // avoid clobbering G5_method 301 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 302 delayed()->mov(G2_thread, O0); 303 304 mov(L1, G1); // Restore G1 305 // G2 restored below 306 mov(L3, G3); // restore G3 307 mov(L4, G4); // restore G4 308 mov(L5, G5_method); // restore G5_method 309 restore(O0, 0, G2_thread); 310 } 311 } 312 313 314 void MacroAssembler::save_thread(const Register thread_cache) { 315 verify_thread(); 316 if (thread_cache->is_valid()) { 317 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 318 mov(G2_thread, thread_cache); 319 } 320 if (VerifyThread) { 321 // smash G2_thread, as if the VM were about to anyway 322 set(0x67676767, G2_thread); 323 } 324 } 325 326 327 void MacroAssembler::restore_thread(const Register thread_cache) { 328 if (thread_cache->is_valid()) { 329 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 330 mov(thread_cache, G2_thread); 331 verify_thread(); 332 } else { 333 // do it the slow way 334 get_thread(); 335 } 336 } 337 338 339 // %%% maybe get rid of [re]set_last_Java_frame 340 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 341 assert_not_delayed(); 342 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 343 JavaFrameAnchor::flags_offset()); 344 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 345 346 // Always set last_Java_pc and flags first because once last_Java_sp is visible 347 // has_last_Java_frame is true and users will look at the rest of the fields. 348 // (Note: flags should always be zero before we get here so doesn't need to be set.) 349 350 #ifdef ASSERT 351 // Verify that flags was zeroed on return to Java 352 Label PcOk; 353 save_frame(0); // to avoid clobbering O0 354 ld_ptr(pc_addr, L0); 355 br_null_short(L0, Assembler::pt, PcOk); 356 STOP("last_Java_pc not zeroed before leaving Java"); 357 bind(PcOk); 358 359 // Verify that flags was zeroed on return to Java 360 Label FlagsOk; 361 ld(flags, L0); 362 tst(L0); 363 br(Assembler::zero, false, Assembler::pt, FlagsOk); 364 delayed() -> restore(); 365 STOP("flags not zeroed before leaving Java"); 366 bind(FlagsOk); 367 #endif /* ASSERT */ 368 // 369 // When returning from calling out from Java mode the frame anchor's last_Java_pc 370 // will always be set to NULL. It is set here so that if we are doing a call to 371 // native (not VM) that we capture the known pc and don't have to rely on the 372 // native call having a standard frame linkage where we can find the pc. 373 374 if (last_Java_pc->is_valid()) { 375 st_ptr(last_Java_pc, pc_addr); 376 } 377 378 #ifdef ASSERT 379 // Make sure that we have an odd stack 380 Label StackOk; 381 andcc(last_java_sp, 0x01, G0); 382 br(Assembler::notZero, false, Assembler::pt, StackOk); 383 delayed()->nop(); 384 STOP("Stack Not Biased in set_last_Java_frame"); 385 bind(StackOk); 386 #endif // ASSERT 387 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 388 add( last_java_sp, STACK_BIAS, G4_scratch ); 389 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 390 } 391 392 void MacroAssembler::reset_last_Java_frame(void) { 393 assert_not_delayed(); 394 395 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 396 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 397 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 398 399 #ifdef ASSERT 400 // check that it WAS previously set 401 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 402 ld_ptr(sp_addr, L0); 403 tst(L0); 404 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 405 restore(); 406 #endif // ASSERT 407 408 st_ptr(G0, sp_addr); 409 // Always return last_Java_pc to zero 410 st_ptr(G0, pc_addr); 411 // Always null flags after return to Java 412 st(G0, flags); 413 } 414 415 416 void MacroAssembler::call_VM_base( 417 Register oop_result, 418 Register thread_cache, 419 Register last_java_sp, 420 address entry_point, 421 int number_of_arguments, 422 bool check_exceptions) 423 { 424 assert_not_delayed(); 425 426 // determine last_java_sp register 427 if (!last_java_sp->is_valid()) { 428 last_java_sp = SP; 429 } 430 // debugging support 431 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 432 433 // 64-bit last_java_sp is biased! 434 set_last_Java_frame(last_java_sp, noreg); 435 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 436 save_thread(thread_cache); 437 // do the call 438 call(entry_point, relocInfo::runtime_call_type); 439 if (!VerifyThread) 440 delayed()->mov(G2_thread, O0); // pass thread as first argument 441 else 442 delayed()->nop(); // (thread already passed) 443 restore_thread(thread_cache); 444 reset_last_Java_frame(); 445 446 // check for pending exceptions. use Gtemp as scratch register. 447 if (check_exceptions) { 448 check_and_forward_exception(Gtemp); 449 } 450 451 #ifdef ASSERT 452 set(badHeapWordVal, G3); 453 set(badHeapWordVal, G4); 454 set(badHeapWordVal, G5); 455 #endif 456 457 // get oop result if there is one and reset the value in the thread 458 if (oop_result->is_valid()) { 459 get_vm_result(oop_result); 460 } 461 } 462 463 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 464 { 465 Label L; 466 467 check_and_handle_popframe(scratch_reg); 468 check_and_handle_earlyret(scratch_reg); 469 470 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 471 ld_ptr(exception_addr, scratch_reg); 472 br_null_short(scratch_reg, pt, L); 473 // we use O7 linkage so that forward_exception_entry has the issuing PC 474 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 475 delayed()->nop(); 476 bind(L); 477 } 478 479 480 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 481 } 482 483 484 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 485 } 486 487 488 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 489 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 490 } 491 492 493 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 494 // O0 is reserved for the thread 495 mov(arg_1, O1); 496 call_VM(oop_result, entry_point, 1, check_exceptions); 497 } 498 499 500 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 501 // O0 is reserved for the thread 502 mov(arg_1, O1); 503 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 504 call_VM(oop_result, entry_point, 2, check_exceptions); 505 } 506 507 508 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 509 // O0 is reserved for the thread 510 mov(arg_1, O1); 511 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 512 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 513 call_VM(oop_result, entry_point, 3, check_exceptions); 514 } 515 516 517 518 // Note: The following call_VM overloadings are useful when a "save" 519 // has already been performed by a stub, and the last Java frame is 520 // the previous one. In that case, last_java_sp must be passed as FP 521 // instead of SP. 522 523 524 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 525 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 526 } 527 528 529 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 530 // O0 is reserved for the thread 531 mov(arg_1, O1); 532 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 533 } 534 535 536 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 537 // O0 is reserved for the thread 538 mov(arg_1, O1); 539 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 540 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 541 } 542 543 544 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 545 // O0 is reserved for the thread 546 mov(arg_1, O1); 547 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 548 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 549 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 550 } 551 552 553 554 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 555 assert_not_delayed(); 556 save_thread(thread_cache); 557 // do the call 558 call(entry_point, relocInfo::runtime_call_type); 559 delayed()->nop(); 560 restore_thread(thread_cache); 561 #ifdef ASSERT 562 set(badHeapWordVal, G3); 563 set(badHeapWordVal, G4); 564 set(badHeapWordVal, G5); 565 #endif 566 } 567 568 569 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 570 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 571 } 572 573 574 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 575 mov(arg_1, O0); 576 call_VM_leaf(thread_cache, entry_point, 1); 577 } 578 579 580 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 581 mov(arg_1, O0); 582 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 583 call_VM_leaf(thread_cache, entry_point, 2); 584 } 585 586 587 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 588 mov(arg_1, O0); 589 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 590 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 591 call_VM_leaf(thread_cache, entry_point, 3); 592 } 593 594 595 void MacroAssembler::get_vm_result(Register oop_result) { 596 verify_thread(); 597 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 598 ld_ptr( vm_result_addr, oop_result); 599 st_ptr(G0, vm_result_addr); 600 verify_oop(oop_result); 601 } 602 603 604 void MacroAssembler::get_vm_result_2(Register metadata_result) { 605 verify_thread(); 606 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 607 ld_ptr(vm_result_addr_2, metadata_result); 608 st_ptr(G0, vm_result_addr_2); 609 } 610 611 612 // We require that C code which does not return a value in vm_result will 613 // leave it undisturbed. 614 void MacroAssembler::set_vm_result(Register oop_result) { 615 verify_thread(); 616 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 617 verify_oop(oop_result); 618 619 # ifdef ASSERT 620 // Check that we are not overwriting any other oop. 621 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 622 ld_ptr(vm_result_addr, L0); 623 tst(L0); 624 restore(); 625 breakpoint_trap(notZero, Assembler::ptr_cc); 626 // } 627 # endif 628 629 st_ptr(oop_result, vm_result_addr); 630 } 631 632 633 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 634 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 635 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 636 relocate(rspec); 637 call(entry, relocInfo::none); 638 if (emit_delay) { 639 delayed()->nop(); 640 } 641 } 642 643 void MacroAssembler::card_table_write(jbyte* byte_map_base, 644 Register tmp, Register obj) { 645 srlx(obj, CardTableModRefBS::card_shift, obj); 646 assert(tmp != obj, "need separate temp reg"); 647 set((address) byte_map_base, tmp); 648 stb(G0, tmp, obj); 649 } 650 651 652 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 653 address save_pc; 654 int shiftcnt; 655 #ifdef VALIDATE_PIPELINE 656 assert_no_delay("Cannot put two instructions in delay-slot."); 657 #endif 658 v9_dep(); 659 save_pc = pc(); 660 661 int msb32 = (int) (addrlit.value() >> 32); 662 int lsb32 = (int) (addrlit.value()); 663 664 if (msb32 == 0 && lsb32 >= 0) { 665 Assembler::sethi(lsb32, d, addrlit.rspec()); 666 } 667 else if (msb32 == -1) { 668 Assembler::sethi(~lsb32, d, addrlit.rspec()); 669 xor3(d, ~low10(~0), d); 670 } 671 else { 672 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 673 if (msb32 & 0x3ff) // Any bits? 674 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 675 if (lsb32 & 0xFFFFFC00) { // done? 676 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 677 sllx(d, 12, d); // Make room for next 12 bits 678 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 679 shiftcnt = 0; // We already shifted 680 } 681 else 682 shiftcnt = 12; 683 if ((lsb32 >> 10) & 0x3ff) { 684 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 685 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 686 shiftcnt = 0; 687 } 688 else 689 shiftcnt = 10; 690 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 691 } 692 else 693 sllx(d, 32, d); 694 } 695 // Pad out the instruction sequence so it can be patched later. 696 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 697 addrlit.rtype() != relocInfo::runtime_call_type)) { 698 while (pc() < (save_pc + (7 * BytesPerInstWord))) 699 nop(); 700 } 701 } 702 703 704 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 705 internal_sethi(addrlit, d, false); 706 } 707 708 709 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 710 internal_sethi(addrlit, d, true); 711 } 712 713 714 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 715 if (worst_case) return 7; 716 intptr_t iaddr = (intptr_t) a; 717 int msb32 = (int) (iaddr >> 32); 718 int lsb32 = (int) (iaddr); 719 int count; 720 if (msb32 == 0 && lsb32 >= 0) 721 count = 1; 722 else if (msb32 == -1) 723 count = 2; 724 else { 725 count = 2; 726 if (msb32 & 0x3ff) 727 count++; 728 if (lsb32 & 0xFFFFFC00 ) { 729 if ((lsb32 >> 20) & 0xfff) count += 2; 730 if ((lsb32 >> 10) & 0x3ff) count += 2; 731 } 732 } 733 return count; 734 } 735 736 int MacroAssembler::worst_case_insts_for_set() { 737 return insts_for_sethi(NULL, true) + 1; 738 } 739 740 741 // Keep in sync with MacroAssembler::insts_for_internal_set 742 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 743 intptr_t value = addrlit.value(); 744 745 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 746 // can optimize 747 if (-4096 <= value && value <= 4095) { 748 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 749 return; 750 } 751 if (inv_hi22(hi22(value)) == value) { 752 sethi(addrlit, d); 753 return; 754 } 755 } 756 assert_no_delay("Cannot put two instructions in delay-slot."); 757 internal_sethi(addrlit, d, ForceRelocatable); 758 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 759 add(d, addrlit.low10(), d, addrlit.rspec()); 760 } 761 } 762 763 // Keep in sync with MacroAssembler::internal_set 764 int MacroAssembler::insts_for_internal_set(intptr_t value) { 765 // can optimize 766 if (-4096 <= value && value <= 4095) { 767 return 1; 768 } 769 if (inv_hi22(hi22(value)) == value) { 770 return insts_for_sethi((address) value); 771 } 772 int count = insts_for_sethi((address) value); 773 AddressLiteral al(value); 774 if (al.low10() != 0) { 775 count++; 776 } 777 return count; 778 } 779 780 void MacroAssembler::set(const AddressLiteral& al, Register d) { 781 internal_set(al, d, false); 782 } 783 784 void MacroAssembler::set(intptr_t value, Register d) { 785 AddressLiteral al(value); 786 internal_set(al, d, false); 787 } 788 789 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 790 AddressLiteral al(addr, rspec); 791 internal_set(al, d, false); 792 } 793 794 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 795 internal_set(al, d, true); 796 } 797 798 void MacroAssembler::patchable_set(intptr_t value, Register d) { 799 AddressLiteral al(value); 800 internal_set(al, d, true); 801 } 802 803 804 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 805 assert_not_delayed(); 806 v9_dep(); 807 808 int hi = (int)(value >> 32); 809 int lo = (int)(value & ~0); 810 int bits_33to2 = (int)((value >> 2) & ~0); 811 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 812 if (Assembler::is_simm13(lo) && value == lo) { 813 or3(G0, lo, d); 814 } else if (hi == 0) { 815 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 816 if (low10(lo) != 0) 817 or3(d, low10(lo), d); 818 } 819 else if ((hi >> 2) == 0) { 820 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 821 sllx(d, 2, d); 822 if (low12(lo) != 0) 823 or3(d, low12(lo), d); 824 } 825 else if (hi == -1) { 826 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 827 xor3(d, low10(lo) ^ ~low10(~0), d); 828 } 829 else if (lo == 0) { 830 if (Assembler::is_simm13(hi)) { 831 or3(G0, hi, d); 832 } else { 833 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 834 if (low10(hi) != 0) 835 or3(d, low10(hi), d); 836 } 837 sllx(d, 32, d); 838 } 839 else { 840 Assembler::sethi(hi, tmp); 841 Assembler::sethi(lo, d); // macro assembler version sign-extends 842 if (low10(hi) != 0) 843 or3 (tmp, low10(hi), tmp); 844 if (low10(lo) != 0) 845 or3 ( d, low10(lo), d); 846 sllx(tmp, 32, tmp); 847 or3 (d, tmp, d); 848 } 849 } 850 851 int MacroAssembler::insts_for_set64(jlong value) { 852 v9_dep(); 853 854 int hi = (int) (value >> 32); 855 int lo = (int) (value & ~0); 856 int count = 0; 857 858 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 859 if (Assembler::is_simm13(lo) && value == lo) { 860 count++; 861 } else if (hi == 0) { 862 count++; 863 if (low10(lo) != 0) 864 count++; 865 } 866 else if (hi == -1) { 867 count += 2; 868 } 869 else if (lo == 0) { 870 if (Assembler::is_simm13(hi)) { 871 count++; 872 } else { 873 count++; 874 if (low10(hi) != 0) 875 count++; 876 } 877 count++; 878 } 879 else { 880 count += 2; 881 if (low10(hi) != 0) 882 count++; 883 if (low10(lo) != 0) 884 count++; 885 count += 2; 886 } 887 return count; 888 } 889 890 // compute size in bytes of sparc frame, given 891 // number of extraWords 892 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 893 894 int nWords = frame::memory_parameter_word_sp_offset; 895 896 nWords += extraWords; 897 898 if (nWords & 1) ++nWords; // round up to double-word 899 900 return nWords * BytesPerWord; 901 } 902 903 904 // save_frame: given number of "extra" words in frame, 905 // issue approp. save instruction (p 200, v8 manual) 906 907 void MacroAssembler::save_frame(int extraWords) { 908 int delta = -total_frame_size_in_bytes(extraWords); 909 if (is_simm13(delta)) { 910 save(SP, delta, SP); 911 } else { 912 set(delta, G3_scratch); 913 save(SP, G3_scratch, SP); 914 } 915 } 916 917 918 void MacroAssembler::save_frame_c1(int size_in_bytes) { 919 if (is_simm13(-size_in_bytes)) { 920 save(SP, -size_in_bytes, SP); 921 } else { 922 set(-size_in_bytes, G3_scratch); 923 save(SP, G3_scratch, SP); 924 } 925 } 926 927 928 void MacroAssembler::save_frame_and_mov(int extraWords, 929 Register s1, Register d1, 930 Register s2, Register d2) { 931 assert_not_delayed(); 932 933 // The trick here is to use precisely the same memory word 934 // that trap handlers also use to save the register. 935 // This word cannot be used for any other purpose, but 936 // it works fine to save the register's value, whether or not 937 // an interrupt flushes register windows at any given moment! 938 Address s1_addr; 939 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 940 s1_addr = s1->address_in_saved_window(); 941 st_ptr(s1, s1_addr); 942 } 943 944 Address s2_addr; 945 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 946 s2_addr = s2->address_in_saved_window(); 947 st_ptr(s2, s2_addr); 948 } 949 950 save_frame(extraWords); 951 952 if (s1_addr.base() == SP) { 953 ld_ptr(s1_addr.after_save(), d1); 954 } else if (s1->is_valid()) { 955 mov(s1->after_save(), d1); 956 } 957 958 if (s2_addr.base() == SP) { 959 ld_ptr(s2_addr.after_save(), d2); 960 } else if (s2->is_valid()) { 961 mov(s2->after_save(), d2); 962 } 963 } 964 965 966 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 967 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 968 int index = oop_recorder()->allocate_metadata_index(obj); 969 RelocationHolder rspec = metadata_Relocation::spec(index); 970 return AddressLiteral((address)obj, rspec); 971 } 972 973 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 974 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 975 int index = oop_recorder()->find_index(obj); 976 RelocationHolder rspec = metadata_Relocation::spec(index); 977 return AddressLiteral((address)obj, rspec); 978 } 979 980 981 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 982 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 983 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 984 int oop_index = oop_recorder()->find_index(obj); 985 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 986 } 987 988 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 989 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 990 int oop_index = oop_recorder()->find_index(obj); 991 RelocationHolder rspec = oop_Relocation::spec(oop_index); 992 993 assert_not_delayed(); 994 // Relocation with special format (see relocInfo_sparc.hpp). 995 relocate(rspec, 1); 996 // Assembler::sethi(0x3fffff, d); 997 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 998 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 999 add(d, 0x3ff, d); 1000 1001 } 1002 1003 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1004 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1005 int klass_index = oop_recorder()->find_index(k); 1006 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1007 narrowOop encoded_k = Klass::encode_klass(k); 1008 1009 assert_not_delayed(); 1010 // Relocation with special format (see relocInfo_sparc.hpp). 1011 relocate(rspec, 1); 1012 // Assembler::sethi(encoded_k, d); 1013 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1014 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1015 add(d, low10(encoded_k), d); 1016 1017 } 1018 1019 void MacroAssembler::align(int modulus) { 1020 while (offset() % modulus != 0) nop(); 1021 } 1022 1023 void RegistersForDebugging::print(outputStream* s) { 1024 FlagSetting fs(Debugging, true); 1025 int j; 1026 for (j = 0; j < 8; ++j) { 1027 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1028 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1029 } 1030 s->cr(); 1031 1032 for (j = 0; j < 8; ++j) { 1033 s->print("l%d = ", j); os::print_location(s, l[j]); 1034 } 1035 s->cr(); 1036 1037 for (j = 0; j < 8; ++j) { 1038 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1039 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1040 } 1041 s->cr(); 1042 1043 for (j = 0; j < 8; ++j) { 1044 s->print("g%d = ", j); os::print_location(s, g[j]); 1045 } 1046 s->cr(); 1047 1048 // print out floats with compression 1049 for (j = 0; j < 32; ) { 1050 jfloat val = f[j]; 1051 int last = j; 1052 for ( ; last+1 < 32; ++last ) { 1053 char b1[1024], b2[1024]; 1054 sprintf(b1, "%f", val); 1055 sprintf(b2, "%f", f[last+1]); 1056 if (strcmp(b1, b2)) 1057 break; 1058 } 1059 s->print("f%d", j); 1060 if ( j != last ) s->print(" - f%d", last); 1061 s->print(" = %f", val); 1062 s->fill_to(25); 1063 s->print_cr(" (0x%x)", *(int*)&val); 1064 j = last + 1; 1065 } 1066 s->cr(); 1067 1068 // and doubles (evens only) 1069 for (j = 0; j < 32; ) { 1070 jdouble val = d[j]; 1071 int last = j; 1072 for ( ; last+1 < 32; ++last ) { 1073 char b1[1024], b2[1024]; 1074 sprintf(b1, "%f", val); 1075 sprintf(b2, "%f", d[last+1]); 1076 if (strcmp(b1, b2)) 1077 break; 1078 } 1079 s->print("d%d", 2 * j); 1080 if ( j != last ) s->print(" - d%d", last); 1081 s->print(" = %f", val); 1082 s->fill_to(30); 1083 s->print("(0x%x)", *(int*)&val); 1084 s->fill_to(42); 1085 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1086 j = last + 1; 1087 } 1088 s->cr(); 1089 } 1090 1091 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1092 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1093 a->flushw(); 1094 int i; 1095 for (i = 0; i < 8; ++i) { 1096 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1097 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1098 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1099 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1100 } 1101 for (i = 0; i < 32; ++i) { 1102 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1103 } 1104 for (i = 0; i < 64; i += 2) { 1105 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1106 } 1107 } 1108 1109 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1110 for (int i = 1; i < 8; ++i) { 1111 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1112 } 1113 for (int j = 0; j < 32; ++j) { 1114 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1115 } 1116 for (int k = 0; k < 64; k += 2) { 1117 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1118 } 1119 } 1120 1121 1122 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1123 void MacroAssembler::push_fTOS() { 1124 // %%%%%% need to implement this 1125 } 1126 1127 // pops double TOS element from CPU stack and pushes on FPU stack 1128 void MacroAssembler::pop_fTOS() { 1129 // %%%%%% need to implement this 1130 } 1131 1132 void MacroAssembler::empty_FPU_stack() { 1133 // %%%%%% need to implement this 1134 } 1135 1136 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1137 // plausibility check for oops 1138 if (!VerifyOops) return; 1139 1140 if (reg == G0) return; // always NULL, which is always an oop 1141 1142 BLOCK_COMMENT("verify_oop {"); 1143 char buffer[64]; 1144 #ifdef COMPILER1 1145 if (CommentedAssembly) { 1146 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1147 block_comment(buffer); 1148 } 1149 #endif 1150 1151 const char* real_msg = NULL; 1152 { 1153 ResourceMark rm; 1154 stringStream ss; 1155 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1156 real_msg = code_string(ss.as_string()); 1157 } 1158 1159 // Call indirectly to solve generation ordering problem 1160 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1161 1162 // Make some space on stack above the current register window. 1163 // Enough to hold 8 64-bit registers. 1164 add(SP,-8*8,SP); 1165 1166 // Save some 64-bit registers; a normal 'save' chops the heads off 1167 // of 64-bit longs in the 32-bit build. 1168 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1169 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1170 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1171 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1172 1173 // Size of set() should stay the same 1174 patchable_set((intptr_t)real_msg, O1); 1175 // Load address to call to into O7 1176 load_ptr_contents(a, O7); 1177 // Register call to verify_oop_subroutine 1178 callr(O7, G0); 1179 delayed()->nop(); 1180 // recover frame size 1181 add(SP, 8*8,SP); 1182 BLOCK_COMMENT("} verify_oop"); 1183 } 1184 1185 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1186 // plausibility check for oops 1187 if (!VerifyOops) return; 1188 1189 const char* real_msg = NULL; 1190 { 1191 ResourceMark rm; 1192 stringStream ss; 1193 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1194 real_msg = code_string(ss.as_string()); 1195 } 1196 1197 // Call indirectly to solve generation ordering problem 1198 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1199 1200 // Make some space on stack above the current register window. 1201 // Enough to hold 8 64-bit registers. 1202 add(SP,-8*8,SP); 1203 1204 // Save some 64-bit registers; a normal 'save' chops the heads off 1205 // of 64-bit longs in the 32-bit build. 1206 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1207 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1208 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1209 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1210 1211 // Size of set() should stay the same 1212 patchable_set((intptr_t)real_msg, O1); 1213 // Load address to call to into O7 1214 load_ptr_contents(a, O7); 1215 // Register call to verify_oop_subroutine 1216 callr(O7, G0); 1217 delayed()->nop(); 1218 // recover frame size 1219 add(SP, 8*8,SP); 1220 } 1221 1222 // side-door communication with signalHandler in os_solaris.cpp 1223 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1224 1225 // This macro is expanded just once; it creates shared code. Contract: 1226 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1227 // registers, including flags. May not use a register 'save', as this blows 1228 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1229 // call. 1230 void MacroAssembler::verify_oop_subroutine() { 1231 // Leaf call; no frame. 1232 Label succeed, fail, null_or_fail; 1233 1234 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1235 // O0 is now the oop to be checked. O7 is the return address. 1236 Register O0_obj = O0; 1237 1238 // Save some more registers for temps. 1239 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1240 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1241 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1242 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1243 1244 // Save flags 1245 Register O5_save_flags = O5; 1246 rdccr( O5_save_flags ); 1247 1248 { // count number of verifies 1249 Register O2_adr = O2; 1250 Register O3_accum = O3; 1251 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1252 } 1253 1254 Register O2_mask = O2; 1255 Register O3_bits = O3; 1256 Register O4_temp = O4; 1257 1258 // mark lower end of faulting range 1259 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1260 _verify_oop_implicit_branch[0] = pc(); 1261 1262 // We can't check the mark oop because it could be in the process of 1263 // locking or unlocking while this is running. 1264 set(Universe::verify_oop_mask (), O2_mask); 1265 set(Universe::verify_oop_bits (), O3_bits); 1266 1267 // assert((obj & oop_mask) == oop_bits); 1268 and3(O0_obj, O2_mask, O4_temp); 1269 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1270 1271 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1272 // the null_or_fail case is useless; must test for null separately 1273 br_null_short(O0_obj, pn, succeed); 1274 } 1275 1276 // Check the Klass* of this object for being in the right area of memory. 1277 // Cannot do the load in the delay above slot in case O0 is null 1278 load_klass(O0_obj, O0_obj); 1279 // assert((klass != NULL) 1280 br_null_short(O0_obj, pn, fail); 1281 1282 wrccr( O5_save_flags ); // Restore CCR's 1283 1284 // mark upper end of faulting range 1285 _verify_oop_implicit_branch[1] = pc(); 1286 1287 //----------------------- 1288 // all tests pass 1289 bind(succeed); 1290 1291 // Restore prior 64-bit registers 1292 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1293 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1294 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1295 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1296 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1297 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1298 1299 retl(); // Leaf return; restore prior O7 in delay slot 1300 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1301 1302 //----------------------- 1303 bind(null_or_fail); // nulls are less common but OK 1304 br_null(O0_obj, false, pt, succeed); 1305 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1306 1307 //----------------------- 1308 // report failure: 1309 bind(fail); 1310 _verify_oop_implicit_branch[2] = pc(); 1311 1312 wrccr( O5_save_flags ); // Restore CCR's 1313 1314 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1315 1316 // stop_subroutine expects message pointer in I1. 1317 mov(I1, O1); 1318 1319 // Restore prior 64-bit registers 1320 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1321 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1322 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1323 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1324 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1325 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1326 1327 // factor long stop-sequence into subroutine to save space 1328 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1329 1330 // call indirectly to solve generation ordering problem 1331 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1332 load_ptr_contents(al, O5); 1333 jmpl(O5, 0, O7); 1334 delayed()->nop(); 1335 } 1336 1337 1338 void MacroAssembler::stop(const char* msg) { 1339 // save frame first to get O7 for return address 1340 // add one word to size in case struct is odd number of words long 1341 // It must be doubleword-aligned for storing doubles into it. 1342 1343 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1344 1345 // stop_subroutine expects message pointer in I1. 1346 // Size of set() should stay the same 1347 patchable_set((intptr_t)msg, O1); 1348 1349 // factor long stop-sequence into subroutine to save space 1350 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1351 1352 // call indirectly to solve generation ordering problem 1353 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1354 load_ptr_contents(a, O5); 1355 jmpl(O5, 0, O7); 1356 delayed()->nop(); 1357 1358 breakpoint_trap(); // make stop actually stop rather than writing 1359 // unnoticeable results in the output files. 1360 1361 // restore(); done in callee to save space! 1362 } 1363 1364 1365 void MacroAssembler::warn(const char* msg) { 1366 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1367 RegistersForDebugging::save_registers(this); 1368 mov(O0, L0); 1369 // Size of set() should stay the same 1370 patchable_set((intptr_t)msg, O0); 1371 call( CAST_FROM_FN_PTR(address, warning) ); 1372 delayed()->nop(); 1373 // ret(); 1374 // delayed()->restore(); 1375 RegistersForDebugging::restore_registers(this, L0); 1376 restore(); 1377 } 1378 1379 1380 void MacroAssembler::untested(const char* what) { 1381 // We must be able to turn interactive prompting off 1382 // in order to run automated test scripts on the VM 1383 // Use the flag ShowMessageBoxOnError 1384 1385 const char* b = NULL; 1386 { 1387 ResourceMark rm; 1388 stringStream ss; 1389 ss.print("untested: %s", what); 1390 b = code_string(ss.as_string()); 1391 } 1392 if (ShowMessageBoxOnError) { STOP(b); } 1393 else { warn(b); } 1394 } 1395 1396 1397 void MacroAssembler::unimplemented(const char* what) { 1398 char* b = new char[1024]; 1399 jio_snprintf(b, 1024, "unimplemented: %s", what); 1400 stop(b); 1401 } 1402 1403 1404 void MacroAssembler::stop_subroutine() { 1405 RegistersForDebugging::save_registers(this); 1406 1407 // for the sake of the debugger, stick a PC on the current frame 1408 // (this assumes that the caller has performed an extra "save") 1409 mov(I7, L7); 1410 add(O7, -7 * BytesPerInt, I7); 1411 1412 save_frame(); // one more save to free up another O7 register 1413 mov(I0, O1); // addr of reg save area 1414 1415 // We expect pointer to message in I1. Caller must set it up in O1 1416 mov(I1, O0); // get msg 1417 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1418 delayed()->nop(); 1419 1420 restore(); 1421 1422 RegistersForDebugging::restore_registers(this, O0); 1423 1424 save_frame(0); 1425 call(CAST_FROM_FN_PTR(address,breakpoint)); 1426 delayed()->nop(); 1427 restore(); 1428 1429 mov(L7, I7); 1430 retl(); 1431 delayed()->restore(); // see stop above 1432 } 1433 1434 1435 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1436 if ( ShowMessageBoxOnError ) { 1437 JavaThread* thread = JavaThread::current(); 1438 JavaThreadState saved_state = thread->thread_state(); 1439 thread->set_thread_state(_thread_in_vm); 1440 { 1441 // In order to get locks work, we need to fake a in_VM state 1442 ttyLocker ttyl; 1443 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1444 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1445 BytecodeCounter::print(); 1446 } 1447 if (os::message_box(msg, "Execution stopped, print registers?")) 1448 regs->print(::tty); 1449 } 1450 BREAKPOINT; 1451 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1452 } 1453 else { 1454 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1455 } 1456 assert(false, "DEBUG MESSAGE: %s", msg); 1457 } 1458 1459 1460 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1461 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1462 Label no_extras; 1463 br( negative, true, pt, no_extras ); // if neg, clear reg 1464 delayed()->set(0, Rresult); // annuled, so only if taken 1465 bind( no_extras ); 1466 } 1467 1468 1469 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1470 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1471 bclr(1, Rresult); 1472 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1473 } 1474 1475 1476 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1477 calc_frame_size(Rextra_words, Rresult); 1478 neg(Rresult); 1479 save(SP, Rresult, SP); 1480 } 1481 1482 1483 // --------------------------------------------------------- 1484 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1485 switch (c) { 1486 /*case zero: */ 1487 case Assembler::equal: return Assembler::rc_z; 1488 case Assembler::lessEqual: return Assembler::rc_lez; 1489 case Assembler::less: return Assembler::rc_lz; 1490 /*case notZero:*/ 1491 case Assembler::notEqual: return Assembler::rc_nz; 1492 case Assembler::greater: return Assembler::rc_gz; 1493 case Assembler::greaterEqual: return Assembler::rc_gez; 1494 } 1495 ShouldNotReachHere(); 1496 return Assembler::rc_z; 1497 } 1498 1499 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1500 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1501 tst(s1); 1502 br (c, a, p, L); 1503 } 1504 1505 // Compares a pointer register with zero and branches on null. 1506 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1507 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1508 assert_not_delayed(); 1509 bpr( rc_z, a, p, s1, L ); 1510 } 1511 1512 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1513 assert_not_delayed(); 1514 bpr( rc_nz, a, p, s1, L ); 1515 } 1516 1517 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1518 1519 // Compare integer (32 bit) values (icc only). 1520 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1521 Predict p, Label& L) { 1522 assert_not_delayed(); 1523 if (use_cbcond(L)) { 1524 Assembler::cbcond(c, icc, s1, s2, L); 1525 } else { 1526 cmp(s1, s2); 1527 br(c, false, p, L); 1528 delayed()->nop(); 1529 } 1530 } 1531 1532 // Compare integer (32 bit) values (icc only). 1533 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1534 Predict p, Label& L) { 1535 assert_not_delayed(); 1536 if (is_simm(simm13a,5) && use_cbcond(L)) { 1537 Assembler::cbcond(c, icc, s1, simm13a, L); 1538 } else { 1539 cmp(s1, simm13a); 1540 br(c, false, p, L); 1541 delayed()->nop(); 1542 } 1543 } 1544 1545 // Branch that tests xcc in LP64 and icc in !LP64 1546 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1547 Predict p, Label& L) { 1548 assert_not_delayed(); 1549 if (use_cbcond(L)) { 1550 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1551 } else { 1552 cmp(s1, s2); 1553 brx(c, false, p, L); 1554 delayed()->nop(); 1555 } 1556 } 1557 1558 // Branch that tests xcc in LP64 and icc in !LP64 1559 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1560 Predict p, Label& L) { 1561 assert_not_delayed(); 1562 if (is_simm(simm13a,5) && use_cbcond(L)) { 1563 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1564 } else { 1565 cmp(s1, simm13a); 1566 brx(c, false, p, L); 1567 delayed()->nop(); 1568 } 1569 } 1570 1571 // Short branch version for compares a pointer with zero. 1572 1573 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1574 assert_not_delayed(); 1575 if (use_cbcond(L)) { 1576 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1577 } else { 1578 br_null(s1, false, p, L); 1579 delayed()->nop(); 1580 } 1581 } 1582 1583 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1584 assert_not_delayed(); 1585 if (use_cbcond(L)) { 1586 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1587 } else { 1588 br_notnull(s1, false, p, L); 1589 delayed()->nop(); 1590 } 1591 } 1592 1593 // Unconditional short branch 1594 void MacroAssembler::ba_short(Label& L) { 1595 assert_not_delayed(); 1596 if (use_cbcond(L)) { 1597 Assembler::cbcond(equal, icc, G0, G0, L); 1598 } else { 1599 br(always, false, pt, L); 1600 delayed()->nop(); 1601 } 1602 } 1603 1604 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1605 1606 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1607 assert_not_delayed(); 1608 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1609 br(cf, false, p, L); 1610 delayed()->nop(); 1611 } 1612 1613 // instruction sequences factored across compiler & interpreter 1614 1615 1616 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1617 Register Rb_hi, Register Rb_low, 1618 Register Rresult) { 1619 1620 Label check_low_parts, done; 1621 1622 cmp(Ra_hi, Rb_hi ); // compare hi parts 1623 br(equal, true, pt, check_low_parts); 1624 delayed()->cmp(Ra_low, Rb_low); // test low parts 1625 1626 // And, with an unsigned comparison, it does not matter if the numbers 1627 // are negative or not. 1628 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1629 // The second one is bigger (unsignedly). 1630 1631 // Other notes: The first move in each triplet can be unconditional 1632 // (and therefore probably prefetchable). 1633 // And the equals case for the high part does not need testing, 1634 // since that triplet is reached only after finding the high halves differ. 1635 1636 mov(-1, Rresult); 1637 ba(done); 1638 delayed()->movcc(greater, false, icc, 1, Rresult); 1639 1640 bind(check_low_parts); 1641 1642 mov( -1, Rresult); 1643 movcc(equal, false, icc, 0, Rresult); 1644 movcc(greaterUnsigned, false, icc, 1, Rresult); 1645 1646 bind(done); 1647 } 1648 1649 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1650 subcc( G0, Rlow, Rlow ); 1651 subc( G0, Rhi, Rhi ); 1652 } 1653 1654 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1655 Register Rcount, 1656 Register Rout_high, Register Rout_low, 1657 Register Rtemp ) { 1658 1659 1660 Register Ralt_count = Rtemp; 1661 Register Rxfer_bits = Rtemp; 1662 1663 assert( Ralt_count != Rin_high 1664 && Ralt_count != Rin_low 1665 && Ralt_count != Rcount 1666 && Rxfer_bits != Rin_low 1667 && Rxfer_bits != Rin_high 1668 && Rxfer_bits != Rcount 1669 && Rxfer_bits != Rout_low 1670 && Rout_low != Rin_high, 1671 "register alias checks"); 1672 1673 Label big_shift, done; 1674 1675 // This code can be optimized to use the 64 bit shifts in V9. 1676 // Here we use the 32 bit shifts. 1677 1678 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1679 subcc(Rcount, 31, Ralt_count); 1680 br(greater, true, pn, big_shift); 1681 delayed()->dec(Ralt_count); 1682 1683 // shift < 32 bits, Ralt_count = Rcount-31 1684 1685 // We get the transfer bits by shifting right by 32-count the low 1686 // register. This is done by shifting right by 31-count and then by one 1687 // more to take care of the special (rare) case where count is zero 1688 // (shifting by 32 would not work). 1689 1690 neg(Ralt_count); 1691 1692 // The order of the next two instructions is critical in the case where 1693 // Rin and Rout are the same and should not be reversed. 1694 1695 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1696 if (Rcount != Rout_low) { 1697 sll(Rin_low, Rcount, Rout_low); // low half 1698 } 1699 sll(Rin_high, Rcount, Rout_high); 1700 if (Rcount == Rout_low) { 1701 sll(Rin_low, Rcount, Rout_low); // low half 1702 } 1703 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1704 ba(done); 1705 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1706 1707 // shift >= 32 bits, Ralt_count = Rcount-32 1708 bind(big_shift); 1709 sll(Rin_low, Ralt_count, Rout_high ); 1710 clr(Rout_low); 1711 1712 bind(done); 1713 } 1714 1715 1716 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1717 Register Rcount, 1718 Register Rout_high, Register Rout_low, 1719 Register Rtemp ) { 1720 1721 Register Ralt_count = Rtemp; 1722 Register Rxfer_bits = Rtemp; 1723 1724 assert( Ralt_count != Rin_high 1725 && Ralt_count != Rin_low 1726 && Ralt_count != Rcount 1727 && Rxfer_bits != Rin_low 1728 && Rxfer_bits != Rin_high 1729 && Rxfer_bits != Rcount 1730 && Rxfer_bits != Rout_high 1731 && Rout_high != Rin_low, 1732 "register alias checks"); 1733 1734 Label big_shift, done; 1735 1736 // This code can be optimized to use the 64 bit shifts in V9. 1737 // Here we use the 32 bit shifts. 1738 1739 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1740 subcc(Rcount, 31, Ralt_count); 1741 br(greater, true, pn, big_shift); 1742 delayed()->dec(Ralt_count); 1743 1744 // shift < 32 bits, Ralt_count = Rcount-31 1745 1746 // We get the transfer bits by shifting left by 32-count the high 1747 // register. This is done by shifting left by 31-count and then by one 1748 // more to take care of the special (rare) case where count is zero 1749 // (shifting by 32 would not work). 1750 1751 neg(Ralt_count); 1752 if (Rcount != Rout_low) { 1753 srl(Rin_low, Rcount, Rout_low); 1754 } 1755 1756 // The order of the next two instructions is critical in the case where 1757 // Rin and Rout are the same and should not be reversed. 1758 1759 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1760 sra(Rin_high, Rcount, Rout_high ); // high half 1761 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1762 if (Rcount == Rout_low) { 1763 srl(Rin_low, Rcount, Rout_low); 1764 } 1765 ba(done); 1766 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1767 1768 // shift >= 32 bits, Ralt_count = Rcount-32 1769 bind(big_shift); 1770 1771 sra(Rin_high, Ralt_count, Rout_low); 1772 sra(Rin_high, 31, Rout_high); // sign into hi 1773 1774 bind( done ); 1775 } 1776 1777 1778 1779 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1780 Register Rcount, 1781 Register Rout_high, Register Rout_low, 1782 Register Rtemp ) { 1783 1784 Register Ralt_count = Rtemp; 1785 Register Rxfer_bits = Rtemp; 1786 1787 assert( Ralt_count != Rin_high 1788 && Ralt_count != Rin_low 1789 && Ralt_count != Rcount 1790 && Rxfer_bits != Rin_low 1791 && Rxfer_bits != Rin_high 1792 && Rxfer_bits != Rcount 1793 && Rxfer_bits != Rout_high 1794 && Rout_high != Rin_low, 1795 "register alias checks"); 1796 1797 Label big_shift, done; 1798 1799 // This code can be optimized to use the 64 bit shifts in V9. 1800 // Here we use the 32 bit shifts. 1801 1802 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1803 subcc(Rcount, 31, Ralt_count); 1804 br(greater, true, pn, big_shift); 1805 delayed()->dec(Ralt_count); 1806 1807 // shift < 32 bits, Ralt_count = Rcount-31 1808 1809 // We get the transfer bits by shifting left by 32-count the high 1810 // register. This is done by shifting left by 31-count and then by one 1811 // more to take care of the special (rare) case where count is zero 1812 // (shifting by 32 would not work). 1813 1814 neg(Ralt_count); 1815 if (Rcount != Rout_low) { 1816 srl(Rin_low, Rcount, Rout_low); 1817 } 1818 1819 // The order of the next two instructions is critical in the case where 1820 // Rin and Rout are the same and should not be reversed. 1821 1822 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1823 srl(Rin_high, Rcount, Rout_high ); // high half 1824 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1825 if (Rcount == Rout_low) { 1826 srl(Rin_low, Rcount, Rout_low); 1827 } 1828 ba(done); 1829 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1830 1831 // shift >= 32 bits, Ralt_count = Rcount-32 1832 bind(big_shift); 1833 1834 srl(Rin_high, Ralt_count, Rout_low); 1835 clr(Rout_high); 1836 1837 bind( done ); 1838 } 1839 1840 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1841 cmp(Ra, Rb); 1842 mov(-1, Rresult); 1843 movcc(equal, false, xcc, 0, Rresult); 1844 movcc(greater, false, xcc, 1, Rresult); 1845 } 1846 1847 1848 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1849 switch (size_in_bytes) { 1850 case 8: ld_long(src, dst); break; 1851 case 4: ld( src, dst); break; 1852 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1853 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1854 default: ShouldNotReachHere(); 1855 } 1856 } 1857 1858 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1859 switch (size_in_bytes) { 1860 case 8: st_long(src, dst); break; 1861 case 4: st( src, dst); break; 1862 case 2: sth( src, dst); break; 1863 case 1: stb( src, dst); break; 1864 default: ShouldNotReachHere(); 1865 } 1866 } 1867 1868 1869 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1870 FloatRegister Fa, FloatRegister Fb, 1871 Register Rresult) { 1872 if (is_float) { 1873 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1874 } else { 1875 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1876 } 1877 1878 if (unordered_result == 1) { 1879 mov( -1, Rresult); 1880 movcc(f_equal, true, fcc0, 0, Rresult); 1881 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1882 } else { 1883 mov( -1, Rresult); 1884 movcc(f_equal, true, fcc0, 0, Rresult); 1885 movcc(f_greater, true, fcc0, 1, Rresult); 1886 } 1887 } 1888 1889 1890 void MacroAssembler::save_all_globals_into_locals() { 1891 mov(G1,L1); 1892 mov(G2,L2); 1893 mov(G3,L3); 1894 mov(G4,L4); 1895 mov(G5,L5); 1896 mov(G6,L6); 1897 mov(G7,L7); 1898 } 1899 1900 void MacroAssembler::restore_globals_from_locals() { 1901 mov(L1,G1); 1902 mov(L2,G2); 1903 mov(L3,G3); 1904 mov(L4,G4); 1905 mov(L5,G5); 1906 mov(L6,G6); 1907 mov(L7,G7); 1908 } 1909 1910 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1911 Register tmp, 1912 int offset) { 1913 intptr_t value = *delayed_value_addr; 1914 if (value != 0) 1915 return RegisterOrConstant(value + offset); 1916 1917 // load indirectly to solve generation ordering problem 1918 AddressLiteral a(delayed_value_addr); 1919 load_ptr_contents(a, tmp); 1920 1921 #ifdef ASSERT 1922 tst(tmp); 1923 breakpoint_trap(zero, xcc); 1924 #endif 1925 1926 if (offset != 0) 1927 add(tmp, offset, tmp); 1928 1929 return RegisterOrConstant(tmp); 1930 } 1931 1932 1933 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1934 assert(d.register_or_noreg() != G0, "lost side effect"); 1935 if ((s2.is_constant() && s2.as_constant() == 0) || 1936 (s2.is_register() && s2.as_register() == G0)) { 1937 // Do nothing, just move value. 1938 if (s1.is_register()) { 1939 if (d.is_constant()) d = temp; 1940 mov(s1.as_register(), d.as_register()); 1941 return d; 1942 } else { 1943 return s1; 1944 } 1945 } 1946 1947 if (s1.is_register()) { 1948 assert_different_registers(s1.as_register(), temp); 1949 if (d.is_constant()) d = temp; 1950 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1951 return d; 1952 } else { 1953 if (s2.is_register()) { 1954 assert_different_registers(s2.as_register(), temp); 1955 if (d.is_constant()) d = temp; 1956 set(s1.as_constant(), temp); 1957 andn(temp, s2.as_register(), d.as_register()); 1958 return d; 1959 } else { 1960 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1961 return res; 1962 } 1963 } 1964 } 1965 1966 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1967 assert(d.register_or_noreg() != G0, "lost side effect"); 1968 if ((s2.is_constant() && s2.as_constant() == 0) || 1969 (s2.is_register() && s2.as_register() == G0)) { 1970 // Do nothing, just move value. 1971 if (s1.is_register()) { 1972 if (d.is_constant()) d = temp; 1973 mov(s1.as_register(), d.as_register()); 1974 return d; 1975 } else { 1976 return s1; 1977 } 1978 } 1979 1980 if (s1.is_register()) { 1981 assert_different_registers(s1.as_register(), temp); 1982 if (d.is_constant()) d = temp; 1983 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1984 return d; 1985 } else { 1986 if (s2.is_register()) { 1987 assert_different_registers(s2.as_register(), temp); 1988 if (d.is_constant()) d = temp; 1989 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 1990 return d; 1991 } else { 1992 intptr_t res = s1.as_constant() + s2.as_constant(); 1993 return res; 1994 } 1995 } 1996 } 1997 1998 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1999 assert(d.register_or_noreg() != G0, "lost side effect"); 2000 if (!is_simm13(s2.constant_or_zero())) 2001 s2 = (s2.as_constant() & 0xFF); 2002 if ((s2.is_constant() && s2.as_constant() == 0) || 2003 (s2.is_register() && s2.as_register() == G0)) { 2004 // Do nothing, just move value. 2005 if (s1.is_register()) { 2006 if (d.is_constant()) d = temp; 2007 mov(s1.as_register(), d.as_register()); 2008 return d; 2009 } else { 2010 return s1; 2011 } 2012 } 2013 2014 if (s1.is_register()) { 2015 assert_different_registers(s1.as_register(), temp); 2016 if (d.is_constant()) d = temp; 2017 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2018 return d; 2019 } else { 2020 if (s2.is_register()) { 2021 assert_different_registers(s2.as_register(), temp); 2022 if (d.is_constant()) d = temp; 2023 set(s1.as_constant(), temp); 2024 sll_ptr(temp, s2.as_register(), d.as_register()); 2025 return d; 2026 } else { 2027 intptr_t res = s1.as_constant() << s2.as_constant(); 2028 return res; 2029 } 2030 } 2031 } 2032 2033 2034 // Look up the method for a megamorphic invokeinterface call. 2035 // The target method is determined by <intf_klass, itable_index>. 2036 // The receiver klass is in recv_klass. 2037 // On success, the result will be in method_result, and execution falls through. 2038 // On failure, execution transfers to the given label. 2039 void MacroAssembler::lookup_interface_method(Register recv_klass, 2040 Register intf_klass, 2041 RegisterOrConstant itable_index, 2042 Register method_result, 2043 Register scan_temp, 2044 Register sethi_temp, 2045 Label& L_no_such_interface) { 2046 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2047 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2048 "caller must use same register for non-constant itable index as for method"); 2049 2050 Label L_no_such_interface_restore; 2051 bool did_save = false; 2052 if (scan_temp == noreg || sethi_temp == noreg) { 2053 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2054 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2055 assert(method_result->is_global(), "must be able to return value"); 2056 scan_temp = L2; 2057 sethi_temp = L3; 2058 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2059 recv_klass = recv_2; 2060 intf_klass = intf_2; 2061 did_save = true; 2062 } 2063 2064 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2065 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2066 int scan_step = itableOffsetEntry::size() * wordSize; 2067 int vte_size = vtableEntry::size_in_bytes(); 2068 2069 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2070 // %%% We should store the aligned, prescaled offset in the klassoop. 2071 // Then the next several instructions would fold away. 2072 2073 int itb_offset = vtable_base; 2074 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2075 sll(scan_temp, itb_scale, scan_temp); 2076 add(scan_temp, itb_offset, scan_temp); 2077 add(recv_klass, scan_temp, scan_temp); 2078 2079 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2080 RegisterOrConstant itable_offset = itable_index; 2081 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2082 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2083 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2084 2085 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2086 // if (scan->interface() == intf) { 2087 // result = (klass + scan->offset() + itable_index); 2088 // } 2089 // } 2090 Label L_search, L_found_method; 2091 2092 for (int peel = 1; peel >= 0; peel--) { 2093 // %%%% Could load both offset and interface in one ldx, if they were 2094 // in the opposite order. This would save a load. 2095 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2096 2097 // Check that this entry is non-null. A null entry means that 2098 // the receiver class doesn't implement the interface, and wasn't the 2099 // same as when the caller was compiled. 2100 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2101 delayed()->cmp(method_result, intf_klass); 2102 2103 if (peel) { 2104 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2105 } else { 2106 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2107 // (invert the test to fall through to found_method...) 2108 } 2109 delayed()->add(scan_temp, scan_step, scan_temp); 2110 2111 if (!peel) break; 2112 2113 bind(L_search); 2114 } 2115 2116 bind(L_found_method); 2117 2118 // Got a hit. 2119 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2120 // scan_temp[-scan_step] points to the vtable offset we need 2121 ito_offset -= scan_step; 2122 lduw(scan_temp, ito_offset, scan_temp); 2123 ld_ptr(recv_klass, scan_temp, method_result); 2124 2125 if (did_save) { 2126 Label L_done; 2127 ba(L_done); 2128 delayed()->restore(); 2129 2130 bind(L_no_such_interface_restore); 2131 ba(L_no_such_interface); 2132 delayed()->restore(); 2133 2134 bind(L_done); 2135 } 2136 } 2137 2138 2139 // virtual method calling 2140 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2141 RegisterOrConstant vtable_index, 2142 Register method_result) { 2143 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2144 Register sethi_temp = method_result; 2145 const int base = in_bytes(Klass::vtable_start_offset()) + 2146 // method pointer offset within the vtable entry: 2147 vtableEntry::method_offset_in_bytes(); 2148 RegisterOrConstant vtable_offset = vtable_index; 2149 // Each of the following three lines potentially generates an instruction. 2150 // But the total number of address formation instructions will always be 2151 // at most two, and will often be zero. In any case, it will be optimal. 2152 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2153 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2154 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2155 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2156 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2157 ld_ptr(vtable_entry_addr, method_result); 2158 } 2159 2160 2161 void MacroAssembler::check_klass_subtype(Register sub_klass, 2162 Register super_klass, 2163 Register temp_reg, 2164 Register temp2_reg, 2165 Label& L_success) { 2166 Register sub_2 = sub_klass; 2167 Register sup_2 = super_klass; 2168 if (!sub_2->is_global()) sub_2 = L0; 2169 if (!sup_2->is_global()) sup_2 = L1; 2170 bool did_save = false; 2171 if (temp_reg == noreg || temp2_reg == noreg) { 2172 temp_reg = L2; 2173 temp2_reg = L3; 2174 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2175 sub_klass = sub_2; 2176 super_klass = sup_2; 2177 did_save = true; 2178 } 2179 Label L_failure, L_pop_to_failure, L_pop_to_success; 2180 check_klass_subtype_fast_path(sub_klass, super_klass, 2181 temp_reg, temp2_reg, 2182 (did_save ? &L_pop_to_success : &L_success), 2183 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2184 2185 if (!did_save) 2186 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2187 check_klass_subtype_slow_path(sub_2, sup_2, 2188 L2, L3, L4, L5, 2189 NULL, &L_pop_to_failure); 2190 2191 // on success: 2192 bind(L_pop_to_success); 2193 restore(); 2194 ba_short(L_success); 2195 2196 // on failure: 2197 bind(L_pop_to_failure); 2198 restore(); 2199 bind(L_failure); 2200 } 2201 2202 2203 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2204 Register super_klass, 2205 Register temp_reg, 2206 Register temp2_reg, 2207 Label* L_success, 2208 Label* L_failure, 2209 Label* L_slow_path, 2210 RegisterOrConstant super_check_offset) { 2211 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2212 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2213 2214 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2215 bool need_slow_path = (must_load_sco || 2216 super_check_offset.constant_or_zero() == sco_offset); 2217 2218 assert_different_registers(sub_klass, super_klass, temp_reg); 2219 if (super_check_offset.is_register()) { 2220 assert_different_registers(sub_klass, super_klass, temp_reg, 2221 super_check_offset.as_register()); 2222 } else if (must_load_sco) { 2223 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2224 } 2225 2226 Label L_fallthrough; 2227 int label_nulls = 0; 2228 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2229 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2230 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2231 assert(label_nulls <= 1 || 2232 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2233 "at most one NULL in the batch, usually"); 2234 2235 // If the pointers are equal, we are done (e.g., String[] elements). 2236 // This self-check enables sharing of secondary supertype arrays among 2237 // non-primary types such as array-of-interface. Otherwise, each such 2238 // type would need its own customized SSA. 2239 // We move this check to the front of the fast path because many 2240 // type checks are in fact trivially successful in this manner, 2241 // so we get a nicely predicted branch right at the start of the check. 2242 cmp(super_klass, sub_klass); 2243 brx(Assembler::equal, false, Assembler::pn, *L_success); 2244 delayed()->nop(); 2245 2246 // Check the supertype display: 2247 if (must_load_sco) { 2248 // The super check offset is always positive... 2249 lduw(super_klass, sco_offset, temp2_reg); 2250 super_check_offset = RegisterOrConstant(temp2_reg); 2251 // super_check_offset is register. 2252 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2253 } 2254 ld_ptr(sub_klass, super_check_offset, temp_reg); 2255 cmp(super_klass, temp_reg); 2256 2257 // This check has worked decisively for primary supers. 2258 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2259 // (Secondary supers are interfaces and very deeply nested subtypes.) 2260 // This works in the same check above because of a tricky aliasing 2261 // between the super_cache and the primary super display elements. 2262 // (The 'super_check_addr' can address either, as the case requires.) 2263 // Note that the cache is updated below if it does not help us find 2264 // what we need immediately. 2265 // So if it was a primary super, we can just fail immediately. 2266 // Otherwise, it's the slow path for us (no success at this point). 2267 2268 // Hacked ba(), which may only be used just before L_fallthrough. 2269 #define FINAL_JUMP(label) \ 2270 if (&(label) != &L_fallthrough) { \ 2271 ba(label); delayed()->nop(); \ 2272 } 2273 2274 if (super_check_offset.is_register()) { 2275 brx(Assembler::equal, false, Assembler::pn, *L_success); 2276 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2277 2278 if (L_failure == &L_fallthrough) { 2279 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2280 delayed()->nop(); 2281 } else { 2282 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2283 delayed()->nop(); 2284 FINAL_JUMP(*L_slow_path); 2285 } 2286 } else if (super_check_offset.as_constant() == sc_offset) { 2287 // Need a slow path; fast failure is impossible. 2288 if (L_slow_path == &L_fallthrough) { 2289 brx(Assembler::equal, false, Assembler::pt, *L_success); 2290 delayed()->nop(); 2291 } else { 2292 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2293 delayed()->nop(); 2294 FINAL_JUMP(*L_success); 2295 } 2296 } else { 2297 // No slow path; it's a fast decision. 2298 if (L_failure == &L_fallthrough) { 2299 brx(Assembler::equal, false, Assembler::pt, *L_success); 2300 delayed()->nop(); 2301 } else { 2302 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2303 delayed()->nop(); 2304 FINAL_JUMP(*L_success); 2305 } 2306 } 2307 2308 bind(L_fallthrough); 2309 2310 #undef FINAL_JUMP 2311 } 2312 2313 2314 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2315 Register super_klass, 2316 Register count_temp, 2317 Register scan_temp, 2318 Register scratch_reg, 2319 Register coop_reg, 2320 Label* L_success, 2321 Label* L_failure) { 2322 assert_different_registers(sub_klass, super_klass, 2323 count_temp, scan_temp, scratch_reg, coop_reg); 2324 2325 Label L_fallthrough, L_loop; 2326 int label_nulls = 0; 2327 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2328 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2329 assert(label_nulls <= 1, "at most one NULL in the batch"); 2330 2331 // a couple of useful fields in sub_klass: 2332 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2333 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2334 2335 // Do a linear scan of the secondary super-klass chain. 2336 // This code is rarely used, so simplicity is a virtue here. 2337 2338 #ifndef PRODUCT 2339 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2340 inc_counter((address) pst_counter, count_temp, scan_temp); 2341 #endif 2342 2343 // We will consult the secondary-super array. 2344 ld_ptr(sub_klass, ss_offset, scan_temp); 2345 2346 Register search_key = super_klass; 2347 2348 // Load the array length. (Positive movl does right thing on LP64.) 2349 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2350 2351 // Check for empty secondary super list 2352 tst(count_temp); 2353 2354 // In the array of super classes elements are pointer sized. 2355 int element_size = wordSize; 2356 2357 // Top of search loop 2358 bind(L_loop); 2359 br(Assembler::equal, false, Assembler::pn, *L_failure); 2360 delayed()->add(scan_temp, element_size, scan_temp); 2361 2362 // Skip the array header in all array accesses. 2363 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2364 elem_offset -= element_size; // the scan pointer was pre-incremented also 2365 2366 // Load next super to check 2367 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2368 2369 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2370 cmp(scratch_reg, search_key); 2371 2372 // A miss means we are NOT a subtype and need to keep looping 2373 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2374 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2375 2376 // Success. Cache the super we found and proceed in triumph. 2377 st_ptr(super_klass, sub_klass, sc_offset); 2378 2379 if (L_success != &L_fallthrough) { 2380 ba(*L_success); 2381 delayed()->nop(); 2382 } 2383 2384 bind(L_fallthrough); 2385 } 2386 2387 2388 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2389 Register temp_reg, 2390 int extra_slot_offset) { 2391 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2392 int stackElementSize = Interpreter::stackElementSize; 2393 int offset = extra_slot_offset * stackElementSize; 2394 if (arg_slot.is_constant()) { 2395 offset += arg_slot.as_constant() * stackElementSize; 2396 return offset; 2397 } else { 2398 assert(temp_reg != noreg, "must specify"); 2399 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2400 if (offset != 0) 2401 add(temp_reg, offset, temp_reg); 2402 return temp_reg; 2403 } 2404 } 2405 2406 2407 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2408 Register temp_reg, 2409 int extra_slot_offset) { 2410 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2411 } 2412 2413 2414 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2415 Register temp_reg, 2416 Label& done, Label* slow_case, 2417 BiasedLockingCounters* counters) { 2418 assert(UseBiasedLocking, "why call this otherwise?"); 2419 2420 if (PrintBiasedLockingStatistics) { 2421 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2422 if (counters == NULL) 2423 counters = BiasedLocking::counters(); 2424 } 2425 2426 Label cas_label; 2427 2428 // Biased locking 2429 // See whether the lock is currently biased toward our thread and 2430 // whether the epoch is still valid 2431 // Note that the runtime guarantees sufficient alignment of JavaThread 2432 // pointers to allow age to be placed into low bits 2433 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2434 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2435 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2436 2437 load_klass(obj_reg, temp_reg); 2438 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2439 or3(G2_thread, temp_reg, temp_reg); 2440 xor3(mark_reg, temp_reg, temp_reg); 2441 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2442 if (counters != NULL) { 2443 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2444 // Reload mark_reg as we may need it later 2445 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2446 } 2447 brx(Assembler::equal, true, Assembler::pt, done); 2448 delayed()->nop(); 2449 2450 Label try_revoke_bias; 2451 Label try_rebias; 2452 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2453 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2454 2455 // At this point we know that the header has the bias pattern and 2456 // that we are not the bias owner in the current epoch. We need to 2457 // figure out more details about the state of the header in order to 2458 // know what operations can be legally performed on the object's 2459 // header. 2460 2461 // If the low three bits in the xor result aren't clear, that means 2462 // the prototype header is no longer biased and we have to revoke 2463 // the bias on this object. 2464 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2465 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2466 2467 // Biasing is still enabled for this data type. See whether the 2468 // epoch of the current bias is still valid, meaning that the epoch 2469 // bits of the mark word are equal to the epoch bits of the 2470 // prototype header. (Note that the prototype header's epoch bits 2471 // only change at a safepoint.) If not, attempt to rebias the object 2472 // toward the current thread. Note that we must be absolutely sure 2473 // that the current epoch is invalid in order to do this because 2474 // otherwise the manipulations it performs on the mark word are 2475 // illegal. 2476 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2477 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2478 2479 // The epoch of the current bias is still valid but we know nothing 2480 // about the owner; it might be set or it might be clear. Try to 2481 // acquire the bias of the object using an atomic operation. If this 2482 // fails we will go in to the runtime to revoke the object's bias. 2483 // Note that we first construct the presumed unbiased header so we 2484 // don't accidentally blow away another thread's valid bias. 2485 delayed()->and3(mark_reg, 2486 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2487 mark_reg); 2488 or3(G2_thread, mark_reg, temp_reg); 2489 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2490 // If the biasing toward our thread failed, this means that 2491 // another thread succeeded in biasing it toward itself and we 2492 // need to revoke that bias. The revocation will occur in the 2493 // interpreter runtime in the slow case. 2494 cmp(mark_reg, temp_reg); 2495 if (counters != NULL) { 2496 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2497 } 2498 if (slow_case != NULL) { 2499 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2500 delayed()->nop(); 2501 } 2502 ba_short(done); 2503 2504 bind(try_rebias); 2505 // At this point we know the epoch has expired, meaning that the 2506 // current "bias owner", if any, is actually invalid. Under these 2507 // circumstances _only_, we are allowed to use the current header's 2508 // value as the comparison value when doing the cas to acquire the 2509 // bias in the current epoch. In other words, we allow transfer of 2510 // the bias from one thread to another directly in this situation. 2511 // 2512 // FIXME: due to a lack of registers we currently blow away the age 2513 // bits in this situation. Should attempt to preserve them. 2514 load_klass(obj_reg, temp_reg); 2515 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2516 or3(G2_thread, temp_reg, temp_reg); 2517 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2518 // If the biasing toward our thread failed, this means that 2519 // another thread succeeded in biasing it toward itself and we 2520 // need to revoke that bias. The revocation will occur in the 2521 // interpreter runtime in the slow case. 2522 cmp(mark_reg, temp_reg); 2523 if (counters != NULL) { 2524 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2525 } 2526 if (slow_case != NULL) { 2527 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2528 delayed()->nop(); 2529 } 2530 ba_short(done); 2531 2532 bind(try_revoke_bias); 2533 // The prototype mark in the klass doesn't have the bias bit set any 2534 // more, indicating that objects of this data type are not supposed 2535 // to be biased any more. We are going to try to reset the mark of 2536 // this object to the prototype value and fall through to the 2537 // CAS-based locking scheme. Note that if our CAS fails, it means 2538 // that another thread raced us for the privilege of revoking the 2539 // bias of this particular object, so it's okay to continue in the 2540 // normal locking code. 2541 // 2542 // FIXME: due to a lack of registers we currently blow away the age 2543 // bits in this situation. Should attempt to preserve them. 2544 load_klass(obj_reg, temp_reg); 2545 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2546 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2547 // Fall through to the normal CAS-based lock, because no matter what 2548 // the result of the above CAS, some thread must have succeeded in 2549 // removing the bias bit from the object's header. 2550 if (counters != NULL) { 2551 cmp(mark_reg, temp_reg); 2552 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2553 } 2554 2555 bind(cas_label); 2556 } 2557 2558 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2559 bool allow_delay_slot_filling) { 2560 // Check for biased locking unlock case, which is a no-op 2561 // Note: we do not have to check the thread ID for two reasons. 2562 // First, the interpreter checks for IllegalMonitorStateException at 2563 // a higher level. Second, if the bias was revoked while we held the 2564 // lock, the object could not be rebiased toward another thread, so 2565 // the bias bit would be clear. 2566 ld_ptr(mark_addr, temp_reg); 2567 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2568 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2569 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2570 delayed(); 2571 if (!allow_delay_slot_filling) { 2572 nop(); 2573 } 2574 } 2575 2576 2577 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2578 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2579 // The code could be tightened up considerably. 2580 // 2581 // box->dhw disposition - post-conditions at DONE_LABEL. 2582 // - Successful inflated lock: box->dhw != 0. 2583 // Any non-zero value suffices. 2584 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2585 // - Successful Stack-lock: box->dhw == mark. 2586 // box->dhw must contain the displaced mark word value 2587 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2588 // The slow-path fast_enter() and slow_enter() operators 2589 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2590 // - Biased: box->dhw is undefined 2591 // 2592 // SPARC refworkload performance - specifically jetstream and scimark - are 2593 // extremely sensitive to the size of the code emitted by compiler_lock_object 2594 // and compiler_unlock_object. Critically, the key factor is code size, not path 2595 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2596 // effect). 2597 2598 2599 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2600 Register Rbox, Register Rscratch, 2601 BiasedLockingCounters* counters, 2602 bool try_bias) { 2603 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2604 2605 verify_oop(Roop); 2606 Label done ; 2607 2608 if (counters != NULL) { 2609 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2610 } 2611 2612 if (EmitSync & 1) { 2613 mov(3, Rscratch); 2614 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2615 cmp(SP, G0); 2616 return ; 2617 } 2618 2619 if (EmitSync & 2) { 2620 2621 // Fetch object's markword 2622 ld_ptr(mark_addr, Rmark); 2623 2624 if (try_bias) { 2625 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2626 } 2627 2628 // Save Rbox in Rscratch to be used for the cas operation 2629 mov(Rbox, Rscratch); 2630 2631 // set Rmark to markOop | markOopDesc::unlocked_value 2632 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2633 2634 // Initialize the box. (Must happen before we update the object mark!) 2635 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2636 2637 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2638 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2639 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2640 2641 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2642 // hence we are done 2643 cmp(Rmark, Rscratch); 2644 sub(Rscratch, STACK_BIAS, Rscratch); 2645 brx(Assembler::equal, false, Assembler::pt, done); 2646 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2647 2648 // we did not find an unlocked object so see if this is a recursive case 2649 // sub(Rscratch, SP, Rscratch); 2650 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2651 andcc(Rscratch, 0xfffff003, Rscratch); 2652 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2653 bind (done); 2654 return ; 2655 } 2656 2657 Label Egress ; 2658 2659 if (EmitSync & 256) { 2660 Label IsInflated ; 2661 2662 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2663 // Triage: biased, stack-locked, neutral, inflated 2664 if (try_bias) { 2665 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2666 // Invariant: if control reaches this point in the emitted stream 2667 // then Rmark has not been modified. 2668 } 2669 2670 // Store mark into displaced mark field in the on-stack basic-lock "box" 2671 // Critically, this must happen before the CAS 2672 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2673 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2674 andcc(Rmark, 2, G0); 2675 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2676 delayed()-> 2677 2678 // Try stack-lock acquisition. 2679 // Beware: the 1st instruction is in a delay slot 2680 mov(Rbox, Rscratch); 2681 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2682 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2683 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2684 cmp(Rmark, Rscratch); 2685 brx(Assembler::equal, false, Assembler::pt, done); 2686 delayed()->sub(Rscratch, SP, Rscratch); 2687 2688 // Stack-lock attempt failed - check for recursive stack-lock. 2689 // See the comments below about how we might remove this case. 2690 sub(Rscratch, STACK_BIAS, Rscratch); 2691 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2692 andcc(Rscratch, 0xfffff003, Rscratch); 2693 br(Assembler::always, false, Assembler::pt, done); 2694 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2695 2696 bind(IsInflated); 2697 if (EmitSync & 64) { 2698 // If m->owner != null goto IsLocked 2699 // Pessimistic form: Test-and-CAS vs CAS 2700 // The optimistic form avoids RTS->RTO cache line upgrades. 2701 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2702 andcc(Rscratch, Rscratch, G0); 2703 brx(Assembler::notZero, false, Assembler::pn, done); 2704 delayed()->nop(); 2705 // m->owner == null : it's unlocked. 2706 } 2707 2708 // Try to CAS m->owner from null to Self 2709 // Invariant: if we acquire the lock then _recursions should be 0. 2710 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2711 mov(G2_thread, Rscratch); 2712 cas_ptr(Rmark, G0, Rscratch); 2713 cmp(Rscratch, G0); 2714 // Intentional fall-through into done 2715 } else { 2716 // Aggressively avoid the Store-before-CAS penalty 2717 // Defer the store into box->dhw until after the CAS 2718 Label IsInflated, Recursive ; 2719 2720 // Anticipate CAS -- Avoid RTS->RTO upgrade 2721 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2722 2723 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2724 // Triage: biased, stack-locked, neutral, inflated 2725 2726 if (try_bias) { 2727 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2728 // Invariant: if control reaches this point in the emitted stream 2729 // then Rmark has not been modified. 2730 } 2731 andcc(Rmark, 2, G0); 2732 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2733 delayed()-> // Beware - dangling delay-slot 2734 2735 // Try stack-lock acquisition. 2736 // Transiently install BUSY (0) encoding in the mark word. 2737 // if the CAS of 0 into the mark was successful then we execute: 2738 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2739 // ST obj->mark = box -- overwrite transient 0 value 2740 // This presumes TSO, of course. 2741 2742 mov(0, Rscratch); 2743 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2744 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2745 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2746 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2747 cmp(Rscratch, Rmark); 2748 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2749 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2750 if (counters != NULL) { 2751 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2752 } 2753 ba(done); 2754 delayed()->st_ptr(Rbox, mark_addr); 2755 2756 bind(Recursive); 2757 // Stack-lock attempt failed - check for recursive stack-lock. 2758 // Tests show that we can remove the recursive case with no impact 2759 // on refworkload 0.83. If we need to reduce the size of the code 2760 // emitted by compiler_lock_object() the recursive case is perfect 2761 // candidate. 2762 // 2763 // A more extreme idea is to always inflate on stack-lock recursion. 2764 // This lets us eliminate the recursive checks in compiler_lock_object 2765 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2766 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2767 // and showed a performance *increase*. In the same experiment I eliminated 2768 // the fast-path stack-lock code from the interpreter and always passed 2769 // control to the "slow" operators in synchronizer.cpp. 2770 2771 // RScratch contains the fetched obj->mark value from the failed CAS. 2772 sub(Rscratch, STACK_BIAS, Rscratch); 2773 sub(Rscratch, SP, Rscratch); 2774 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2775 andcc(Rscratch, 0xfffff003, Rscratch); 2776 if (counters != NULL) { 2777 // Accounting needs the Rscratch register 2778 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2779 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2780 ba_short(done); 2781 } else { 2782 ba(done); 2783 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2784 } 2785 2786 bind (IsInflated); 2787 2788 // Try to CAS m->owner from null to Self 2789 // Invariant: if we acquire the lock then _recursions should be 0. 2790 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2791 mov(G2_thread, Rscratch); 2792 cas_ptr(Rmark, G0, Rscratch); 2793 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2794 // set icc.zf : 1=success 0=failure 2795 // ST box->displaced_header = NonZero. 2796 // Any non-zero value suffices: 2797 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2798 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2799 // Intentional fall-through into done 2800 } 2801 2802 bind (done); 2803 } 2804 2805 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2806 Register Rbox, Register Rscratch, 2807 bool try_bias) { 2808 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2809 2810 Label done ; 2811 2812 if (EmitSync & 4) { 2813 cmp(SP, G0); 2814 return ; 2815 } 2816 2817 if (EmitSync & 8) { 2818 if (try_bias) { 2819 biased_locking_exit(mark_addr, Rscratch, done); 2820 } 2821 2822 // Test first if it is a fast recursive unlock 2823 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2824 br_null_short(Rmark, Assembler::pt, done); 2825 2826 // Check if it is still a light weight lock, this is is true if we see 2827 // the stack address of the basicLock in the markOop of the object 2828 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2829 cas_ptr(mark_addr.base(), Rbox, Rmark); 2830 ba(done); 2831 delayed()->cmp(Rbox, Rmark); 2832 bind(done); 2833 return ; 2834 } 2835 2836 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2837 // is too large performance rolls abruptly off a cliff. 2838 // This could be related to inlining policies, code cache management, or 2839 // I$ effects. 2840 Label LStacked ; 2841 2842 if (try_bias) { 2843 // TODO: eliminate redundant LDs of obj->mark 2844 biased_locking_exit(mark_addr, Rscratch, done); 2845 } 2846 2847 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2848 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2849 andcc(Rscratch, Rscratch, G0); 2850 brx(Assembler::zero, false, Assembler::pn, done); 2851 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2852 andcc(Rmark, 2, G0); 2853 brx(Assembler::zero, false, Assembler::pt, LStacked); 2854 delayed()->nop(); 2855 2856 // It's inflated 2857 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2858 // the ST of 0 into _owner which releases the lock. This prevents loads 2859 // and stores within the critical section from reordering (floating) 2860 // past the store that releases the lock. But TSO is a strong memory model 2861 // and that particular flavor of barrier is a noop, so we can safely elide it. 2862 // Note that we use 1-0 locking by default for the inflated case. We 2863 // close the resultant (and rare) race by having contended threads in 2864 // monitorenter periodically poll _owner. 2865 2866 if (EmitSync & 1024) { 2867 // Emit code to check that _owner == Self 2868 // We could fold the _owner test into subsequent code more efficiently 2869 // than using a stand-alone check, but since _owner checking is off by 2870 // default we don't bother. We also might consider predicating the 2871 // _owner==Self check on Xcheck:jni or running on a debug build. 2872 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2873 orcc(Rscratch, G0, G0); 2874 brx(Assembler::notZero, false, Assembler::pn, done); 2875 delayed()->nop(); 2876 } 2877 2878 if (EmitSync & 512) { 2879 // classic lock release code absent 1-0 locking 2880 // m->Owner = null; 2881 // membar #storeload 2882 // if (m->cxq|m->EntryList) == null goto Success 2883 // if (m->succ != null) goto Success 2884 // if CAS (&m->Owner,0,Self) != 0 goto Success 2885 // goto SlowPath 2886 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2887 orcc(Rbox, G0, G0); 2888 brx(Assembler::notZero, false, Assembler::pn, done); 2889 delayed()->nop(); 2890 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2891 if (os::is_MP()) { membar(StoreLoad); } 2892 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2893 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2894 orcc(Rbox, Rscratch, G0); 2895 brx(Assembler::zero, false, Assembler::pt, done); 2896 delayed()-> 2897 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2898 andcc(Rscratch, Rscratch, G0); 2899 brx(Assembler::notZero, false, Assembler::pt, done); 2900 delayed()->andcc(G0, G0, G0); 2901 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2902 mov(G2_thread, Rscratch); 2903 cas_ptr(Rmark, G0, Rscratch); 2904 cmp(Rscratch, G0); 2905 // invert icc.zf and goto done 2906 brx(Assembler::notZero, false, Assembler::pt, done); 2907 delayed()->cmp(G0, G0); 2908 br(Assembler::always, false, Assembler::pt, done); 2909 delayed()->cmp(G0, 1); 2910 } else { 2911 // 1-0 form : avoids CAS and MEMBAR in the common case 2912 // Do not bother to ratify that m->Owner == Self. 2913 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2914 orcc(Rbox, G0, G0); 2915 brx(Assembler::notZero, false, Assembler::pn, done); 2916 delayed()-> 2917 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2918 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2919 orcc(Rbox, Rscratch, G0); 2920 if (EmitSync & 16384) { 2921 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2922 // we should transfer control directly to the slow-path. 2923 // This test makes the reacquire operation below very infrequent. 2924 // The logic is equivalent to : 2925 // if (cxq|EntryList) == null : Owner=null; goto Success 2926 // if succ == null : goto SlowPath 2927 // Owner=null; membar #storeload 2928 // if succ != null : goto Success 2929 // if CAS(&Owner,null,Self) != null goto Success 2930 // goto SlowPath 2931 brx(Assembler::zero, true, Assembler::pt, done); 2932 delayed()-> 2933 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2934 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2935 andcc(Rscratch, Rscratch, G0) ; 2936 brx(Assembler::zero, false, Assembler::pt, done); 2937 delayed()->orcc(G0, 1, G0); 2938 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2939 } else { 2940 brx(Assembler::zero, false, Assembler::pt, done); 2941 delayed()-> 2942 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2943 } 2944 if (os::is_MP()) { membar(StoreLoad); } 2945 // Check that _succ is (or remains) non-zero 2946 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2947 andcc(Rscratch, Rscratch, G0); 2948 brx(Assembler::notZero, false, Assembler::pt, done); 2949 delayed()->andcc(G0, G0, G0); 2950 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2951 mov(G2_thread, Rscratch); 2952 cas_ptr(Rmark, G0, Rscratch); 2953 cmp(Rscratch, G0); 2954 // invert icc.zf and goto done 2955 // A slightly better v8+/v9 idiom would be the following: 2956 // movrnz Rscratch,1,Rscratch 2957 // ba done 2958 // xorcc Rscratch,1,G0 2959 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2960 brx(Assembler::notZero, false, Assembler::pt, done); 2961 delayed()->cmp(G0, G0); 2962 br(Assembler::always, false, Assembler::pt, done); 2963 delayed()->cmp(G0, 1); 2964 } 2965 2966 bind (LStacked); 2967 // Consider: we could replace the expensive CAS in the exit 2968 // path with a simple ST of the displaced mark value fetched from 2969 // the on-stack basiclock box. That admits a race where a thread T2 2970 // in the slow lock path -- inflating with monitor M -- could race a 2971 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2972 // More precisely T1 in the stack-lock unlock path could "stomp" the 2973 // inflated mark value M installed by T2, resulting in an orphan 2974 // object monitor M and T2 becoming stranded. We can remedy that situation 2975 // by having T2 periodically poll the object's mark word using timed wait 2976 // operations. If T2 discovers that a stomp has occurred it vacates 2977 // the monitor M and wakes any other threads stranded on the now-orphan M. 2978 // In addition the monitor scavenger, which performs deflation, 2979 // would also need to check for orpan monitors and stranded threads. 2980 // 2981 // Finally, inflation is also used when T2 needs to assign a hashCode 2982 // to O and O is stack-locked by T1. The "stomp" race could cause 2983 // an assigned hashCode value to be lost. We can avoid that condition 2984 // and provide the necessary hashCode stability invariants by ensuring 2985 // that hashCode generation is idempotent between copying GCs. 2986 // For example we could compute the hashCode of an object O as 2987 // O's heap address XOR some high quality RNG value that is refreshed 2988 // at GC-time. The monitor scavenger would install the hashCode 2989 // found in any orphan monitors. Again, the mechanism admits a 2990 // lost-update "stomp" WAW race but detects and recovers as needed. 2991 // 2992 // A prototype implementation showed excellent results, although 2993 // the scavenger and timeout code was rather involved. 2994 2995 cas_ptr(mark_addr.base(), Rbox, Rscratch); 2996 cmp(Rbox, Rscratch); 2997 // Intentional fall through into done ... 2998 2999 bind(done); 3000 } 3001 3002 3003 3004 void MacroAssembler::print_CPU_state() { 3005 // %%%%% need to implement this 3006 } 3007 3008 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3009 // %%%%% need to implement this 3010 } 3011 3012 void MacroAssembler::push_IU_state() { 3013 // %%%%% need to implement this 3014 } 3015 3016 3017 void MacroAssembler::pop_IU_state() { 3018 // %%%%% need to implement this 3019 } 3020 3021 3022 void MacroAssembler::push_FPU_state() { 3023 // %%%%% need to implement this 3024 } 3025 3026 3027 void MacroAssembler::pop_FPU_state() { 3028 // %%%%% need to implement this 3029 } 3030 3031 3032 void MacroAssembler::push_CPU_state() { 3033 // %%%%% need to implement this 3034 } 3035 3036 3037 void MacroAssembler::pop_CPU_state() { 3038 // %%%%% need to implement this 3039 } 3040 3041 3042 3043 void MacroAssembler::verify_tlab() { 3044 #ifdef ASSERT 3045 if (UseTLAB && VerifyOops) { 3046 Label next, next2, ok; 3047 Register t1 = L0; 3048 Register t2 = L1; 3049 Register t3 = L2; 3050 3051 save_frame(0); 3052 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3053 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3054 or3(t1, t2, t3); 3055 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3056 STOP("assert(top >= start)"); 3057 should_not_reach_here(); 3058 3059 bind(next); 3060 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3061 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3062 or3(t3, t2, t3); 3063 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3064 STOP("assert(top <= end)"); 3065 should_not_reach_here(); 3066 3067 bind(next2); 3068 and3(t3, MinObjAlignmentInBytesMask, t3); 3069 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3070 STOP("assert(aligned)"); 3071 should_not_reach_here(); 3072 3073 bind(ok); 3074 restore(); 3075 } 3076 #endif 3077 } 3078 3079 3080 void MacroAssembler::eden_allocate( 3081 Register obj, // result: pointer to object after successful allocation 3082 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3083 int con_size_in_bytes, // object size in bytes if known at compile time 3084 Register t1, // temp register 3085 Register t2, // temp register 3086 Label& slow_case // continuation point if fast allocation fails 3087 ){ 3088 // make sure arguments make sense 3089 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3090 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3091 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3092 3093 if (!Universe::heap()->supports_inline_contig_alloc()) { 3094 // No allocation in the shared eden. 3095 ba(slow_case); 3096 delayed()->nop(); 3097 } else { 3098 // get eden boundaries 3099 // note: we need both top & top_addr! 3100 const Register top_addr = t1; 3101 const Register end = t2; 3102 3103 CollectedHeap* ch = Universe::heap(); 3104 set((intx)ch->top_addr(), top_addr); 3105 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3106 ld_ptr(top_addr, delta, end); 3107 ld_ptr(top_addr, 0, obj); 3108 3109 // try to allocate 3110 Label retry; 3111 bind(retry); 3112 #ifdef ASSERT 3113 // make sure eden top is properly aligned 3114 { 3115 Label L; 3116 btst(MinObjAlignmentInBytesMask, obj); 3117 br(Assembler::zero, false, Assembler::pt, L); 3118 delayed()->nop(); 3119 STOP("eden top is not properly aligned"); 3120 bind(L); 3121 } 3122 #endif // ASSERT 3123 const Register free = end; 3124 sub(end, obj, free); // compute amount of free space 3125 if (var_size_in_bytes->is_valid()) { 3126 // size is unknown at compile time 3127 cmp(free, var_size_in_bytes); 3128 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3129 delayed()->add(obj, var_size_in_bytes, end); 3130 } else { 3131 // size is known at compile time 3132 cmp(free, con_size_in_bytes); 3133 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3134 delayed()->add(obj, con_size_in_bytes, end); 3135 } 3136 // Compare obj with the value at top_addr; if still equal, swap the value of 3137 // end with the value at top_addr. If not equal, read the value at top_addr 3138 // into end. 3139 cas_ptr(top_addr, obj, end); 3140 // if someone beat us on the allocation, try again, otherwise continue 3141 cmp(obj, end); 3142 brx(Assembler::notEqual, false, Assembler::pn, retry); 3143 delayed()->mov(end, obj); // nop if successfull since obj == end 3144 3145 #ifdef ASSERT 3146 // make sure eden top is properly aligned 3147 { 3148 Label L; 3149 const Register top_addr = t1; 3150 3151 set((intx)ch->top_addr(), top_addr); 3152 ld_ptr(top_addr, 0, top_addr); 3153 btst(MinObjAlignmentInBytesMask, top_addr); 3154 br(Assembler::zero, false, Assembler::pt, L); 3155 delayed()->nop(); 3156 STOP("eden top is not properly aligned"); 3157 bind(L); 3158 } 3159 #endif // ASSERT 3160 } 3161 } 3162 3163 3164 void MacroAssembler::tlab_allocate( 3165 Register obj, // result: pointer to object after successful allocation 3166 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3167 int con_size_in_bytes, // object size in bytes if known at compile time 3168 Register t1, // temp register 3169 Label& slow_case // continuation point if fast allocation fails 3170 ){ 3171 // make sure arguments make sense 3172 assert_different_registers(obj, var_size_in_bytes, t1); 3173 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3174 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3175 3176 const Register free = t1; 3177 3178 verify_tlab(); 3179 3180 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3181 3182 // calculate amount of free space 3183 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3184 sub(free, obj, free); 3185 3186 Label done; 3187 if (var_size_in_bytes == noreg) { 3188 cmp(free, con_size_in_bytes); 3189 } else { 3190 cmp(free, var_size_in_bytes); 3191 } 3192 br(Assembler::less, false, Assembler::pn, slow_case); 3193 // calculate the new top pointer 3194 if (var_size_in_bytes == noreg) { 3195 delayed()->add(obj, con_size_in_bytes, free); 3196 } else { 3197 delayed()->add(obj, var_size_in_bytes, free); 3198 } 3199 3200 bind(done); 3201 3202 #ifdef ASSERT 3203 // make sure new free pointer is properly aligned 3204 { 3205 Label L; 3206 btst(MinObjAlignmentInBytesMask, free); 3207 br(Assembler::zero, false, Assembler::pt, L); 3208 delayed()->nop(); 3209 STOP("updated TLAB free is not properly aligned"); 3210 bind(L); 3211 } 3212 #endif // ASSERT 3213 3214 // update the tlab top pointer 3215 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3216 verify_tlab(); 3217 } 3218 3219 3220 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3221 Register top = O0; 3222 Register t1 = G1; 3223 Register t2 = G3; 3224 Register t3 = O1; 3225 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3226 Label do_refill, discard_tlab; 3227 3228 if (!Universe::heap()->supports_inline_contig_alloc()) { 3229 // No allocation in the shared eden. 3230 ba(slow_case); 3231 delayed()->nop(); 3232 } 3233 3234 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3235 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3236 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3237 3238 // calculate amount of free space 3239 sub(t1, top, t1); 3240 srl_ptr(t1, LogHeapWordSize, t1); 3241 3242 // Retain tlab and allocate object in shared space if 3243 // the amount free in the tlab is too large to discard. 3244 cmp(t1, t2); 3245 3246 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3247 // increment waste limit to prevent getting stuck on this slow path 3248 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3249 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3250 } else { 3251 delayed()->nop(); 3252 // set64 does not use the temp register if the given constant is 32 bit. So 3253 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3254 // of that value. 3255 set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); 3256 add(t2, t3, t2); 3257 } 3258 3259 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3260 if (TLABStats) { 3261 // increment number of slow_allocations 3262 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3263 add(t2, 1, t2); 3264 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3265 } 3266 ba(try_eden); 3267 delayed()->nop(); 3268 3269 bind(discard_tlab); 3270 if (TLABStats) { 3271 // increment number of refills 3272 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3273 add(t2, 1, t2); 3274 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3275 // accumulate wastage 3276 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3277 add(t2, t1, t2); 3278 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3279 } 3280 3281 // if tlab is currently allocated (top or end != null) then 3282 // fill [top, end + alignment_reserve) with array object 3283 br_null_short(top, Assembler::pn, do_refill); 3284 3285 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3286 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3287 // set klass to intArrayKlass 3288 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3289 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3290 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3291 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3292 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3293 ld_ptr(t2, 0, t2); 3294 // store klass last. concurrent gcs assumes klass length is valid if 3295 // klass field is not null. 3296 store_klass(t2, top); 3297 verify_oop(top); 3298 3299 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3300 sub(top, t1, t1); // size of tlab's allocated portion 3301 incr_allocated_bytes(t1, t2, t3); 3302 3303 // refill the tlab with an eden allocation 3304 bind(do_refill); 3305 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3306 sll_ptr(t1, LogHeapWordSize, t1); 3307 // allocate new tlab, address returned in top 3308 eden_allocate(top, t1, 0, t2, t3, slow_case); 3309 3310 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3311 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3312 #ifdef ASSERT 3313 // check that tlab_size (t1) is still valid 3314 { 3315 Label ok; 3316 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3317 sll_ptr(t2, LogHeapWordSize, t2); 3318 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3319 STOP("assert(t1 == tlab_size)"); 3320 should_not_reach_here(); 3321 3322 bind(ok); 3323 } 3324 #endif // ASSERT 3325 add(top, t1, top); // t1 is tlab_size 3326 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3327 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3328 3329 if (ZeroTLAB) { 3330 // This is a fast TLAB refill, therefore the GC is not notified of it. 3331 // So compiled code must fill the new TLAB with zeroes. 3332 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3333 zero_memory(t2, t1); 3334 } 3335 verify_tlab(); 3336 ba(retry); 3337 delayed()->nop(); 3338 } 3339 3340 void MacroAssembler::zero_memory(Register base, Register index) { 3341 assert_different_registers(base, index); 3342 Label loop; 3343 bind(loop); 3344 subcc(index, HeapWordSize, index); 3345 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3346 delayed()->st_ptr(G0, base, index); 3347 } 3348 3349 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3350 Register t1, Register t2) { 3351 // Bump total bytes allocated by this thread 3352 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3353 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3354 // v8 support has gone the way of the dodo 3355 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3356 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3357 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3358 } 3359 3360 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3361 switch (cond) { 3362 // Note some conditions are synonyms for others 3363 case Assembler::never: return Assembler::always; 3364 case Assembler::zero: return Assembler::notZero; 3365 case Assembler::lessEqual: return Assembler::greater; 3366 case Assembler::less: return Assembler::greaterEqual; 3367 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3368 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3369 case Assembler::negative: return Assembler::positive; 3370 case Assembler::overflowSet: return Assembler::overflowClear; 3371 case Assembler::always: return Assembler::never; 3372 case Assembler::notZero: return Assembler::zero; 3373 case Assembler::greater: return Assembler::lessEqual; 3374 case Assembler::greaterEqual: return Assembler::less; 3375 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3376 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3377 case Assembler::positive: return Assembler::negative; 3378 case Assembler::overflowClear: return Assembler::overflowSet; 3379 } 3380 3381 ShouldNotReachHere(); return Assembler::overflowClear; 3382 } 3383 3384 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3385 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3386 Condition negated_cond = negate_condition(cond); 3387 Label L; 3388 brx(negated_cond, false, Assembler::pt, L); 3389 delayed()->nop(); 3390 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3391 bind(L); 3392 } 3393 3394 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3395 AddressLiteral addrlit(counter_addr); 3396 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3397 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3398 ld(addr, Rtmp2); 3399 inc(Rtmp2); 3400 st(Rtmp2, addr); 3401 } 3402 3403 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3404 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3405 } 3406 3407 SkipIfEqual::SkipIfEqual( 3408 MacroAssembler* masm, Register temp, const bool* flag_addr, 3409 Assembler::Condition condition) { 3410 _masm = masm; 3411 AddressLiteral flag(flag_addr); 3412 _masm->sethi(flag, temp); 3413 _masm->ldub(temp, flag.low10(), temp); 3414 _masm->tst(temp); 3415 _masm->br(condition, false, Assembler::pt, _label); 3416 _masm->delayed()->nop(); 3417 } 3418 3419 SkipIfEqual::~SkipIfEqual() { 3420 _masm->bind(_label); 3421 } 3422 3423 3424 // Writes to stack successive pages until offset reached to check for 3425 // stack overflow + shadow pages. This clobbers tsp and scratch. 3426 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3427 Register Rscratch) { 3428 // Use stack pointer in temp stack pointer 3429 mov(SP, Rtsp); 3430 3431 // Bang stack for total size given plus stack shadow page size. 3432 // Bang one page at a time because a large size can overflow yellow and 3433 // red zones (the bang will fail but stack overflow handling can't tell that 3434 // it was a stack overflow bang vs a regular segv). 3435 int offset = os::vm_page_size(); 3436 Register Roffset = Rscratch; 3437 3438 Label loop; 3439 bind(loop); 3440 set((-offset)+STACK_BIAS, Rscratch); 3441 st(G0, Rtsp, Rscratch); 3442 set(offset, Roffset); 3443 sub(Rsize, Roffset, Rsize); 3444 cmp(Rsize, G0); 3445 br(Assembler::greater, false, Assembler::pn, loop); 3446 delayed()->sub(Rtsp, Roffset, Rtsp); 3447 3448 // Bang down shadow pages too. 3449 // At this point, (tmp-0) is the last address touched, so don't 3450 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3451 // was post-decremented.) Skip this address by starting at i=1, and 3452 // touch a few more pages below. N.B. It is important to touch all 3453 // the way down to and including i=StackShadowPages. 3454 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3455 set((-i*offset)+STACK_BIAS, Rscratch); 3456 st(G0, Rtsp, Rscratch); 3457 } 3458 } 3459 3460 void MacroAssembler::reserved_stack_check() { 3461 // testing if reserved zone needs to be enabled 3462 Label no_reserved_zone_enabling; 3463 3464 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3465 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3466 3467 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3468 3469 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3470 jump_to(stub, G4_scratch); 3471 delayed()->restore(); 3472 3473 should_not_reach_here(); 3474 3475 bind(no_reserved_zone_enabling); 3476 } 3477 3478 /////////////////////////////////////////////////////////////////////////////////// 3479 #if INCLUDE_ALL_GCS 3480 3481 static address satb_log_enqueue_with_frame = NULL; 3482 static u_char* satb_log_enqueue_with_frame_end = NULL; 3483 3484 static address satb_log_enqueue_frameless = NULL; 3485 static u_char* satb_log_enqueue_frameless_end = NULL; 3486 3487 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3488 3489 static void generate_satb_log_enqueue(bool with_frame) { 3490 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3491 CodeBuffer buf(bb); 3492 MacroAssembler masm(&buf); 3493 3494 #define __ masm. 3495 3496 address start = __ pc(); 3497 Register pre_val; 3498 3499 Label refill, restart; 3500 if (with_frame) { 3501 __ save_frame(0); 3502 pre_val = I0; // Was O0 before the save. 3503 } else { 3504 pre_val = O0; 3505 } 3506 3507 int satb_q_index_byte_offset = 3508 in_bytes(JavaThread::satb_mark_queue_offset() + 3509 SATBMarkQueue::byte_offset_of_index()); 3510 3511 int satb_q_buf_byte_offset = 3512 in_bytes(JavaThread::satb_mark_queue_offset() + 3513 SATBMarkQueue::byte_offset_of_buf()); 3514 3515 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3516 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3517 "check sizes in assembly below"); 3518 3519 __ bind(restart); 3520 3521 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3522 // so ld_ptr is appropriate. 3523 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3524 3525 // index == 0? 3526 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3527 3528 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3529 __ sub(L0, oopSize, L0); 3530 3531 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3532 if (!with_frame) { 3533 // Use return-from-leaf 3534 __ retl(); 3535 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3536 } else { 3537 // Not delayed. 3538 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3539 } 3540 if (with_frame) { 3541 __ ret(); 3542 __ delayed()->restore(); 3543 } 3544 __ bind(refill); 3545 3546 address handle_zero = 3547 CAST_FROM_FN_PTR(address, 3548 &SATBMarkQueueSet::handle_zero_index_for_thread); 3549 // This should be rare enough that we can afford to save all the 3550 // scratch registers that the calling context might be using. 3551 __ mov(G1_scratch, L0); 3552 __ mov(G3_scratch, L1); 3553 __ mov(G4, L2); 3554 // We need the value of O0 above (for the write into the buffer), so we 3555 // save and restore it. 3556 __ mov(O0, L3); 3557 // Since the call will overwrite O7, we save and restore that, as well. 3558 __ mov(O7, L4); 3559 __ call_VM_leaf(L5, handle_zero, G2_thread); 3560 __ mov(L0, G1_scratch); 3561 __ mov(L1, G3_scratch); 3562 __ mov(L2, G4); 3563 __ mov(L3, O0); 3564 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3565 __ delayed()->mov(L4, O7); 3566 3567 if (with_frame) { 3568 satb_log_enqueue_with_frame = start; 3569 satb_log_enqueue_with_frame_end = __ pc(); 3570 } else { 3571 satb_log_enqueue_frameless = start; 3572 satb_log_enqueue_frameless_end = __ pc(); 3573 } 3574 3575 #undef __ 3576 } 3577 3578 void MacroAssembler::g1_write_barrier_pre(Register obj, 3579 Register index, 3580 int offset, 3581 Register pre_val, 3582 Register tmp, 3583 bool preserve_o_regs) { 3584 Label filtered; 3585 3586 if (obj == noreg) { 3587 // We are not loading the previous value so make 3588 // sure that we don't trash the value in pre_val 3589 // with the code below. 3590 assert_different_registers(pre_val, tmp); 3591 } else { 3592 // We will be loading the previous value 3593 // in this code so... 3594 assert(offset == 0 || index == noreg, "choose one"); 3595 assert(pre_val == noreg, "check this code"); 3596 } 3597 3598 // Is marking active? 3599 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3600 ld(G2, 3601 in_bytes(JavaThread::satb_mark_queue_offset() + 3602 SATBMarkQueue::byte_offset_of_active()), 3603 tmp); 3604 } else { 3605 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3606 "Assumption"); 3607 ldsb(G2, 3608 in_bytes(JavaThread::satb_mark_queue_offset() + 3609 SATBMarkQueue::byte_offset_of_active()), 3610 tmp); 3611 } 3612 3613 // Is marking active? 3614 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3615 3616 // Do we need to load the previous value? 3617 if (obj != noreg) { 3618 // Load the previous value... 3619 if (index == noreg) { 3620 if (Assembler::is_simm13(offset)) { 3621 load_heap_oop(obj, offset, tmp); 3622 } else { 3623 set(offset, tmp); 3624 load_heap_oop(obj, tmp, tmp); 3625 } 3626 } else { 3627 load_heap_oop(obj, index, tmp); 3628 } 3629 // Previous value has been loaded into tmp 3630 pre_val = tmp; 3631 } 3632 3633 assert(pre_val != noreg, "must have a real register"); 3634 3635 // Is the previous value null? 3636 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3637 3638 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3639 // case, pre_val will be a scratch G-reg, but there are some cases in 3640 // which it's an O-reg. In the first case, do a normal call. In the 3641 // latter, do a save here and call the frameless version. 3642 3643 guarantee(pre_val->is_global() || pre_val->is_out(), 3644 "Or we need to think harder."); 3645 3646 if (pre_val->is_global() && !preserve_o_regs) { 3647 call(satb_log_enqueue_with_frame); 3648 delayed()->mov(pre_val, O0); 3649 } else { 3650 save_frame(0); 3651 call(satb_log_enqueue_frameless); 3652 delayed()->mov(pre_val->after_save(), O0); 3653 restore(); 3654 } 3655 3656 bind(filtered); 3657 } 3658 3659 static address dirty_card_log_enqueue = 0; 3660 static u_char* dirty_card_log_enqueue_end = 0; 3661 3662 // This gets to assume that o0 contains the object address. 3663 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3664 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3665 CodeBuffer buf(bb); 3666 MacroAssembler masm(&buf); 3667 #define __ masm. 3668 address start = __ pc(); 3669 3670 Label not_already_dirty, restart, refill, young_card; 3671 3672 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3673 AddressLiteral addrlit(byte_map_base); 3674 __ set(addrlit, O1); // O1 := <card table base> 3675 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3676 3677 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3678 3679 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3680 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3681 3682 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3683 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3684 3685 __ bind(young_card); 3686 // We didn't take the branch, so we're already dirty: return. 3687 // Use return-from-leaf 3688 __ retl(); 3689 __ delayed()->nop(); 3690 3691 // Not dirty. 3692 __ bind(not_already_dirty); 3693 3694 // Get O0 + O1 into a reg by itself 3695 __ add(O0, O1, O3); 3696 3697 // First, dirty it. 3698 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3699 3700 int dirty_card_q_index_byte_offset = 3701 in_bytes(JavaThread::dirty_card_queue_offset() + 3702 DirtyCardQueue::byte_offset_of_index()); 3703 int dirty_card_q_buf_byte_offset = 3704 in_bytes(JavaThread::dirty_card_queue_offset() + 3705 DirtyCardQueue::byte_offset_of_buf()); 3706 __ bind(restart); 3707 3708 // Load the index into the update buffer. DirtyCardQueue::_index is 3709 // a size_t so ld_ptr is appropriate here. 3710 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3711 3712 // index == 0? 3713 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3714 3715 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3716 __ sub(L0, oopSize, L0); 3717 3718 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3719 // Use return-from-leaf 3720 __ retl(); 3721 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3722 3723 __ bind(refill); 3724 address handle_zero = 3725 CAST_FROM_FN_PTR(address, 3726 &DirtyCardQueueSet::handle_zero_index_for_thread); 3727 // This should be rare enough that we can afford to save all the 3728 // scratch registers that the calling context might be using. 3729 __ mov(G1_scratch, L3); 3730 __ mov(G3_scratch, L5); 3731 // We need the value of O3 above (for the write into the buffer), so we 3732 // save and restore it. 3733 __ mov(O3, L6); 3734 // Since the call will overwrite O7, we save and restore that, as well. 3735 __ mov(O7, L4); 3736 3737 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3738 __ mov(L3, G1_scratch); 3739 __ mov(L5, G3_scratch); 3740 __ mov(L6, O3); 3741 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3742 __ delayed()->mov(L4, O7); 3743 3744 dirty_card_log_enqueue = start; 3745 dirty_card_log_enqueue_end = __ pc(); 3746 // XXX Should have a guarantee here about not going off the end! 3747 // Does it already do so? Do an experiment... 3748 3749 #undef __ 3750 3751 } 3752 3753 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3754 3755 Label filtered; 3756 MacroAssembler* post_filter_masm = this; 3757 3758 if (new_val == G0) return; 3759 3760 G1SATBCardTableLoggingModRefBS* bs = 3761 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); 3762 3763 if (G1RSBarrierRegionFilter) { 3764 xor3(store_addr, new_val, tmp); 3765 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3766 3767 // XXX Should I predict this taken or not? Does it matter? 3768 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3769 } 3770 3771 // If the "store_addr" register is an "in" or "local" register, move it to 3772 // a scratch reg so we can pass it as an argument. 3773 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3774 // Pick a scratch register different from "tmp". 3775 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3776 // Make sure we use up the delay slot! 3777 if (use_scr) { 3778 post_filter_masm->mov(store_addr, scr); 3779 } else { 3780 post_filter_masm->nop(); 3781 } 3782 save_frame(0); 3783 call(dirty_card_log_enqueue); 3784 if (use_scr) { 3785 delayed()->mov(scr, O0); 3786 } else { 3787 delayed()->mov(store_addr->after_save(), O0); 3788 } 3789 restore(); 3790 3791 bind(filtered); 3792 } 3793 3794 // Called from init_globals() after universe_init() and before interpreter_init() 3795 void g1_barrier_stubs_init() { 3796 CollectedHeap* heap = Universe::heap(); 3797 if (heap->kind() == CollectedHeap::G1CollectedHeap) { 3798 // Only needed for G1 3799 if (dirty_card_log_enqueue == 0) { 3800 G1SATBCardTableLoggingModRefBS* bs = 3801 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set()); 3802 generate_dirty_card_log_enqueue(bs->byte_map_base); 3803 assert(dirty_card_log_enqueue != 0, "postcondition."); 3804 } 3805 if (satb_log_enqueue_with_frame == 0) { 3806 generate_satb_log_enqueue(true); 3807 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3808 } 3809 if (satb_log_enqueue_frameless == 0) { 3810 generate_satb_log_enqueue(false); 3811 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3812 } 3813 } 3814 } 3815 3816 #endif // INCLUDE_ALL_GCS 3817 /////////////////////////////////////////////////////////////////////////////////// 3818 3819 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3820 // If we're writing constant NULL, we can skip the write barrier. 3821 if (new_val == G0) return; 3822 CardTableModRefBS* bs = 3823 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 3824 assert(bs->kind() == BarrierSet::CardTableForRS || 3825 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3826 card_table_write(bs->byte_map_base, tmp, store_addr); 3827 } 3828 3829 // ((OopHandle)result).resolve(); 3830 void MacroAssembler::resolve_oop_handle(Register result) { 3831 // OopHandle::resolve is an indirection. 3832 ld_ptr(result, 0, result); 3833 } 3834 3835 void MacroAssembler::load_mirror(Register mirror, Register method) { 3836 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3837 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3838 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3839 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3840 ld_ptr(mirror, mirror_offset, mirror); 3841 resolve_oop_handle(mirror); 3842 } 3843 3844 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3845 // The number of bytes in this code is used by 3846 // MachCallDynamicJavaNode::ret_addr_offset() 3847 // if this changes, change that. 3848 if (UseCompressedClassPointers) { 3849 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3850 decode_klass_not_null(klass); 3851 } else { 3852 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3853 } 3854 } 3855 3856 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3857 if (UseCompressedClassPointers) { 3858 assert(dst_oop != klass, "not enough registers"); 3859 encode_klass_not_null(klass); 3860 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3861 } else { 3862 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3863 } 3864 } 3865 3866 void MacroAssembler::store_klass_gap(Register s, Register d) { 3867 if (UseCompressedClassPointers) { 3868 assert(s != d, "not enough registers"); 3869 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3870 } 3871 } 3872 3873 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3874 if (UseCompressedOops) { 3875 lduw(s, d); 3876 decode_heap_oop(d); 3877 } else { 3878 ld_ptr(s, d); 3879 } 3880 } 3881 3882 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3883 if (UseCompressedOops) { 3884 lduw(s1, s2, d); 3885 decode_heap_oop(d, d); 3886 } else { 3887 ld_ptr(s1, s2, d); 3888 } 3889 } 3890 3891 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3892 if (UseCompressedOops) { 3893 lduw(s1, simm13a, d); 3894 decode_heap_oop(d, d); 3895 } else { 3896 ld_ptr(s1, simm13a, d); 3897 } 3898 } 3899 3900 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3901 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3902 else load_heap_oop(s1, s2.as_register(), d); 3903 } 3904 3905 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3906 if (UseCompressedOops) { 3907 assert(s1 != d && s2 != d, "not enough registers"); 3908 encode_heap_oop(d); 3909 st(d, s1, s2); 3910 } else { 3911 st_ptr(d, s1, s2); 3912 } 3913 } 3914 3915 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3916 if (UseCompressedOops) { 3917 assert(s1 != d, "not enough registers"); 3918 encode_heap_oop(d); 3919 st(d, s1, simm13a); 3920 } else { 3921 st_ptr(d, s1, simm13a); 3922 } 3923 } 3924 3925 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3926 if (UseCompressedOops) { 3927 assert(a.base() != d, "not enough registers"); 3928 encode_heap_oop(d); 3929 st(d, a, offset); 3930 } else { 3931 st_ptr(d, a, offset); 3932 } 3933 } 3934 3935 3936 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3937 assert (UseCompressedOops, "must be compressed"); 3938 assert (Universe::heap() != NULL, "java heap should be initialized"); 3939 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3940 verify_oop(src); 3941 if (Universe::narrow_oop_base() == NULL) { 3942 srlx(src, LogMinObjAlignmentInBytes, dst); 3943 return; 3944 } 3945 Label done; 3946 if (src == dst) { 3947 // optimize for frequent case src == dst 3948 bpr(rc_nz, true, Assembler::pt, src, done); 3949 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3950 bind(done); 3951 srlx(src, LogMinObjAlignmentInBytes, dst); 3952 } else { 3953 bpr(rc_z, false, Assembler::pn, src, done); 3954 delayed() -> mov(G0, dst); 3955 // could be moved before branch, and annulate delay, 3956 // but may add some unneeded work decoding null 3957 sub(src, G6_heapbase, dst); 3958 srlx(dst, LogMinObjAlignmentInBytes, dst); 3959 bind(done); 3960 } 3961 } 3962 3963 3964 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3965 assert (UseCompressedOops, "must be compressed"); 3966 assert (Universe::heap() != NULL, "java heap should be initialized"); 3967 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3968 verify_oop(r); 3969 if (Universe::narrow_oop_base() != NULL) 3970 sub(r, G6_heapbase, r); 3971 srlx(r, LogMinObjAlignmentInBytes, r); 3972 } 3973 3974 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3975 assert (UseCompressedOops, "must be compressed"); 3976 assert (Universe::heap() != NULL, "java heap should be initialized"); 3977 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3978 verify_oop(src); 3979 if (Universe::narrow_oop_base() == NULL) { 3980 srlx(src, LogMinObjAlignmentInBytes, dst); 3981 } else { 3982 sub(src, G6_heapbase, dst); 3983 srlx(dst, LogMinObjAlignmentInBytes, dst); 3984 } 3985 } 3986 3987 // Same algorithm as oops.inline.hpp decode_heap_oop. 3988 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3989 assert (UseCompressedOops, "must be compressed"); 3990 assert (Universe::heap() != NULL, "java heap should be initialized"); 3991 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3992 sllx(src, LogMinObjAlignmentInBytes, dst); 3993 if (Universe::narrow_oop_base() != NULL) { 3994 Label done; 3995 bpr(rc_nz, true, Assembler::pt, dst, done); 3996 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3997 bind(done); 3998 } 3999 verify_oop(dst); 4000 } 4001 4002 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4003 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4004 // pd_code_size_limit. 4005 // Also do not verify_oop as this is called by verify_oop. 4006 assert (UseCompressedOops, "must be compressed"); 4007 assert (Universe::heap() != NULL, "java heap should be initialized"); 4008 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4009 sllx(r, LogMinObjAlignmentInBytes, r); 4010 if (Universe::narrow_oop_base() != NULL) 4011 add(r, G6_heapbase, r); 4012 } 4013 4014 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4015 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4016 // pd_code_size_limit. 4017 // Also do not verify_oop as this is called by verify_oop. 4018 assert (UseCompressedOops, "must be compressed"); 4019 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4020 sllx(src, LogMinObjAlignmentInBytes, dst); 4021 if (Universe::narrow_oop_base() != NULL) 4022 add(dst, G6_heapbase, dst); 4023 } 4024 4025 void MacroAssembler::encode_klass_not_null(Register r) { 4026 assert (UseCompressedClassPointers, "must be compressed"); 4027 if (Universe::narrow_klass_base() != NULL) { 4028 assert(r != G6_heapbase, "bad register choice"); 4029 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4030 sub(r, G6_heapbase, r); 4031 if (Universe::narrow_klass_shift() != 0) { 4032 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4033 srlx(r, LogKlassAlignmentInBytes, r); 4034 } 4035 reinit_heapbase(); 4036 } else { 4037 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4038 srlx(r, Universe::narrow_klass_shift(), r); 4039 } 4040 } 4041 4042 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4043 if (src == dst) { 4044 encode_klass_not_null(src); 4045 } else { 4046 assert (UseCompressedClassPointers, "must be compressed"); 4047 if (Universe::narrow_klass_base() != NULL) { 4048 set((intptr_t)Universe::narrow_klass_base(), dst); 4049 sub(src, dst, dst); 4050 if (Universe::narrow_klass_shift() != 0) { 4051 srlx(dst, LogKlassAlignmentInBytes, dst); 4052 } 4053 } else { 4054 // shift src into dst 4055 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4056 srlx(src, Universe::narrow_klass_shift(), dst); 4057 } 4058 } 4059 } 4060 4061 // Function instr_size_for_decode_klass_not_null() counts the instructions 4062 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4063 // the instructions they generate change, then this method needs to be updated. 4064 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4065 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4066 int num_instrs = 1; // shift src,dst or add 4067 if (Universe::narrow_klass_base() != NULL) { 4068 // set + add + set 4069 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 4070 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4071 if (Universe::narrow_klass_shift() != 0) { 4072 num_instrs += 1; // sllx 4073 } 4074 } 4075 return num_instrs * BytesPerInstWord; 4076 } 4077 4078 // !!! If the instructions that get generated here change then function 4079 // instr_size_for_decode_klass_not_null() needs to get updated. 4080 void MacroAssembler::decode_klass_not_null(Register r) { 4081 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4082 // pd_code_size_limit. 4083 assert (UseCompressedClassPointers, "must be compressed"); 4084 if (Universe::narrow_klass_base() != NULL) { 4085 assert(r != G6_heapbase, "bad register choice"); 4086 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4087 if (Universe::narrow_klass_shift() != 0) 4088 sllx(r, LogKlassAlignmentInBytes, r); 4089 add(r, G6_heapbase, r); 4090 reinit_heapbase(); 4091 } else { 4092 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4093 sllx(r, Universe::narrow_klass_shift(), r); 4094 } 4095 } 4096 4097 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4098 if (src == dst) { 4099 decode_klass_not_null(src); 4100 } else { 4101 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4102 // pd_code_size_limit. 4103 assert (UseCompressedClassPointers, "must be compressed"); 4104 if (Universe::narrow_klass_base() != NULL) { 4105 if (Universe::narrow_klass_shift() != 0) { 4106 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4107 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4108 sllx(src, LogKlassAlignmentInBytes, dst); 4109 add(dst, G6_heapbase, dst); 4110 reinit_heapbase(); 4111 } else { 4112 set((intptr_t)Universe::narrow_klass_base(), dst); 4113 add(src, dst, dst); 4114 } 4115 } else { 4116 // shift/mov src into dst. 4117 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4118 sllx(src, Universe::narrow_klass_shift(), dst); 4119 } 4120 } 4121 } 4122 4123 void MacroAssembler::reinit_heapbase() { 4124 if (UseCompressedOops || UseCompressedClassPointers) { 4125 if (Universe::heap() != NULL) { 4126 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4127 } else { 4128 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4129 load_ptr_contents(base, G6_heapbase); 4130 } 4131 } 4132 } 4133 4134 #ifdef COMPILER2 4135 4136 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4137 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4138 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4139 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4140 Label Lloop, Lslow; 4141 assert(UseVIS >= 3, "VIS3 is required"); 4142 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4143 assert_different_registers(ftmp1, ftmp2, ftmp3); 4144 4145 // Check if cnt >= 8 (= 16 bytes) 4146 cmp(cnt, 8); 4147 br(Assembler::less, false, Assembler::pn, Lslow); 4148 delayed()->mov(cnt, result); // copy count 4149 4150 // Check for 8-byte alignment of src and dst 4151 or3(src, dst, tmp1); 4152 andcc(tmp1, 7, G0); 4153 br(Assembler::notZero, false, Assembler::pn, Lslow); 4154 delayed()->nop(); 4155 4156 // Set mask for bshuffle instruction 4157 Register mask = tmp4; 4158 set(0x13579bdf, mask); 4159 bmask(mask, G0, G0); 4160 4161 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4162 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4163 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4164 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4165 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4166 4167 // Load first 8 bytes 4168 ldx(src, 0, tmp1); 4169 4170 bind(Lloop); 4171 // Load next 8 bytes 4172 ldx(src, 8, tmp2); 4173 4174 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4175 // Although we have to move the data between integer and floating point registers, this is 4176 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4177 or3(tmp1, tmp2, tmp3); 4178 btst(tmp3, mask); 4179 // annul zeroing if branch is not taken to preserve original count 4180 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4181 delayed()->mov(G0, result); // 0 - failed 4182 4183 // Move bytes into float register 4184 movxtod(tmp1, ftmp1); 4185 movxtod(tmp2, ftmp2); 4186 4187 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4188 bshuffle(ftmp1, ftmp2, ftmp3); 4189 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4190 4191 // Increment addresses and decrement count 4192 inc(src, 16); 4193 inc(dst, 8); 4194 dec(cnt, 8); 4195 4196 cmp(cnt, 8); 4197 // annul LDX if branch is not taken to prevent access past end of string 4198 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4199 delayed()->ldx(src, 0, tmp1); 4200 4201 // Fallback to slow version 4202 bind(Lslow); 4203 } 4204 4205 // Compress char[] to byte[]. Return 0 on failure. 4206 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4207 Label Lloop; 4208 assert_different_registers(src, dst, cnt, tmp, result); 4209 4210 lduh(src, 0, tmp); 4211 4212 bind(Lloop); 4213 inc(src, sizeof(jchar)); 4214 cmp(tmp, 0xff); 4215 // annul zeroing if branch is not taken to preserve original count 4216 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4217 delayed()->mov(G0, result); // 0 - failed 4218 deccc(cnt); 4219 stb(tmp, dst, 0); 4220 inc(dst); 4221 // annul LDUH if branch is not taken to prevent access past end of string 4222 br(Assembler::notZero, true, Assembler::pt, Lloop); 4223 delayed()->lduh(src, 0, tmp); // hoisted 4224 } 4225 4226 // Inflate byte[] to char[] by inflating 16 bytes at once. 4227 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4228 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4229 Label Lloop, Lslow; 4230 assert(UseVIS >= 3, "VIS3 is required"); 4231 assert_different_registers(src, dst, cnt, tmp); 4232 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4233 4234 // Check if cnt >= 8 (= 16 bytes) 4235 cmp(cnt, 8); 4236 br(Assembler::less, false, Assembler::pn, Lslow); 4237 delayed()->nop(); 4238 4239 // Check for 8-byte alignment of src and dst 4240 or3(src, dst, tmp); 4241 andcc(tmp, 7, G0); 4242 br(Assembler::notZero, false, Assembler::pn, Lslow); 4243 // Initialize float register to zero 4244 FloatRegister zerof = ftmp4; 4245 delayed()->fzero(FloatRegisterImpl::D, zerof); 4246 4247 // Load first 8 bytes 4248 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4249 4250 bind(Lloop); 4251 inc(src, 8); 4252 dec(cnt, 8); 4253 4254 // Inflate the string by interleaving each byte from the source array 4255 // with a zero byte and storing the result in the destination array. 4256 fpmerge(zerof, ftmp1->successor(), ftmp2); 4257 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4258 fpmerge(zerof, ftmp1, ftmp3); 4259 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4260 4261 inc(dst, 16); 4262 4263 cmp(cnt, 8); 4264 // annul LDX if branch is not taken to prevent access past end of string 4265 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4266 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4267 4268 // Fallback to slow version 4269 bind(Lslow); 4270 } 4271 4272 // Inflate byte[] to char[]. 4273 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4274 Label Loop; 4275 assert_different_registers(src, dst, cnt, tmp); 4276 4277 ldub(src, 0, tmp); 4278 bind(Loop); 4279 inc(src); 4280 deccc(cnt); 4281 sth(tmp, dst, 0); 4282 inc(dst, sizeof(jchar)); 4283 // annul LDUB if branch is not taken to prevent access past end of string 4284 br(Assembler::notZero, true, Assembler::pt, Loop); 4285 delayed()->ldub(src, 0, tmp); // hoisted 4286 } 4287 4288 void MacroAssembler::string_compare(Register str1, Register str2, 4289 Register cnt1, Register cnt2, 4290 Register tmp1, Register tmp2, 4291 Register result, int ae) { 4292 Label Ldone, Lloop; 4293 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4294 int stride1, stride2; 4295 4296 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4297 // we interchange str1 and str2 in the UL case and negate the result. 4298 // Like this, str1 is always latin1 encoded, expect for the UU case. 4299 4300 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4301 srl(cnt2, 1, cnt2); 4302 } 4303 4304 // See if the lengths are different, and calculate min in cnt1. 4305 // Save diff in case we need it for a tie-breaker. 4306 Label Lskip; 4307 Register diff = tmp1; 4308 subcc(cnt1, cnt2, diff); 4309 br(Assembler::greater, true, Assembler::pt, Lskip); 4310 // cnt2 is shorter, so use its count: 4311 delayed()->mov(cnt2, cnt1); 4312 bind(Lskip); 4313 4314 // Rename registers 4315 Register limit1 = cnt1; 4316 Register limit2 = limit1; 4317 Register chr1 = result; 4318 Register chr2 = cnt2; 4319 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4320 // We need an additional register to keep track of two limits 4321 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4322 limit2 = tmp2; 4323 } 4324 4325 // Is the minimum length zero? 4326 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4327 br(Assembler::equal, true, Assembler::pn, Ldone); 4328 // result is difference in lengths 4329 if (ae == StrIntrinsicNode::UU) { 4330 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4331 } else { 4332 delayed()->mov(diff, result); 4333 } 4334 4335 // Load first characters 4336 if (ae == StrIntrinsicNode::LL) { 4337 stride1 = stride2 = sizeof(jbyte); 4338 ldub(str1, 0, chr1); 4339 ldub(str2, 0, chr2); 4340 } else if (ae == StrIntrinsicNode::UU) { 4341 stride1 = stride2 = sizeof(jchar); 4342 lduh(str1, 0, chr1); 4343 lduh(str2, 0, chr2); 4344 } else { 4345 stride1 = sizeof(jbyte); 4346 stride2 = sizeof(jchar); 4347 ldub(str1, 0, chr1); 4348 lduh(str2, 0, chr2); 4349 } 4350 4351 // Compare first characters 4352 subcc(chr1, chr2, chr1); 4353 br(Assembler::notZero, false, Assembler::pt, Ldone); 4354 assert(chr1 == result, "result must be pre-placed"); 4355 delayed()->nop(); 4356 4357 // Check if the strings start at same location 4358 cmp(str1, str2); 4359 brx(Assembler::equal, true, Assembler::pn, Ldone); 4360 delayed()->mov(G0, result); // result is zero 4361 4362 // We have no guarantee that on 64 bit the higher half of limit is 0 4363 signx(limit1); 4364 4365 // Get limit 4366 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4367 sll(limit1, 1, limit2); 4368 subcc(limit2, stride2, chr2); 4369 } 4370 subcc(limit1, stride1, chr1); 4371 br(Assembler::zero, true, Assembler::pn, Ldone); 4372 // result is difference in lengths 4373 if (ae == StrIntrinsicNode::UU) { 4374 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4375 } else { 4376 delayed()->mov(diff, result); 4377 } 4378 4379 // Shift str1 and str2 to the end of the arrays, negate limit 4380 add(str1, limit1, str1); 4381 add(str2, limit2, str2); 4382 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4383 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4384 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4385 } 4386 4387 // Compare the rest of the characters 4388 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4389 4390 bind(Lloop); 4391 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4392 4393 subcc(chr1, chr2, chr1); 4394 br(Assembler::notZero, false, Assembler::pt, Ldone); 4395 assert(chr1 == result, "result must be pre-placed"); 4396 delayed()->inccc(limit1, stride1); 4397 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4398 inccc(limit2, stride2); 4399 } 4400 4401 // annul LDUB if branch is not taken to prevent access past end of string 4402 br(Assembler::notZero, true, Assembler::pt, Lloop); 4403 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4404 4405 // If strings are equal up to min length, return the length difference. 4406 if (ae == StrIntrinsicNode::UU) { 4407 // Divide by 2 to get number of chars 4408 sra(diff, 1, result); 4409 } else { 4410 mov(diff, result); 4411 } 4412 4413 // Otherwise, return the difference between the first mismatched chars. 4414 bind(Ldone); 4415 if(ae == StrIntrinsicNode::UL) { 4416 // Negate result (see note above) 4417 neg(result); 4418 } 4419 } 4420 4421 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4422 Register limit, Register tmp, Register result, bool is_byte) { 4423 Label Ldone, Lloop, Lremaining; 4424 assert_different_registers(ary1, ary2, limit, tmp, result); 4425 4426 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4427 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4428 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4429 4430 if (is_array_equ) { 4431 // return true if the same array 4432 cmp(ary1, ary2); 4433 brx(Assembler::equal, true, Assembler::pn, Ldone); 4434 delayed()->mov(1, result); // equal 4435 4436 br_null(ary1, true, Assembler::pn, Ldone); 4437 delayed()->clr(result); // not equal 4438 4439 br_null(ary2, true, Assembler::pn, Ldone); 4440 delayed()->clr(result); // not equal 4441 4442 // load the lengths of arrays 4443 ld(Address(ary1, length_offset), limit); 4444 ld(Address(ary2, length_offset), tmp); 4445 4446 // return false if the two arrays are not equal length 4447 cmp(limit, tmp); 4448 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4449 delayed()->clr(result); // not equal 4450 } 4451 4452 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4453 delayed()->mov(1, result); // zero-length arrays are equal 4454 4455 if (is_array_equ) { 4456 // load array addresses 4457 add(ary1, base_offset, ary1); 4458 add(ary2, base_offset, ary2); 4459 // set byte count 4460 if (!is_byte) { 4461 sll(limit, exact_log2(sizeof(jchar)), limit); 4462 } 4463 } else { 4464 // We have no guarantee that on 64 bit the higher half of limit is 0 4465 signx(limit); 4466 } 4467 4468 #ifdef ASSERT 4469 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4470 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4471 Label Laligned; 4472 or3(ary1, ary2, tmp); 4473 andcc(tmp, 7, tmp); 4474 br_null_short(tmp, Assembler::pn, Laligned); 4475 STOP("First array element is not 8-byte aligned."); 4476 should_not_reach_here(); 4477 bind(Laligned); 4478 #endif 4479 4480 // Shift ary1 and ary2 to the end of the arrays, negate limit 4481 add(ary1, limit, ary1); 4482 add(ary2, limit, ary2); 4483 neg(limit, limit); 4484 4485 // MAIN LOOP 4486 // Load and compare array elements of size 'byte_width' until the elements are not 4487 // equal or we reached the end of the arrays. If the size of the arrays is not a 4488 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4489 // compare the remaining bytes below by skipping the garbage bytes. 4490 ldx(ary1, limit, result); 4491 bind(Lloop); 4492 ldx(ary2, limit, tmp); 4493 inccc(limit, 8); 4494 // Bail out if we reached the end (but still do the comparison) 4495 br(Assembler::positive, false, Assembler::pn, Lremaining); 4496 delayed()->cmp(result, tmp); 4497 // Check equality of elements 4498 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4499 delayed()->ldx(ary1, limit, result); 4500 4501 ba(Ldone); 4502 delayed()->clr(result); // not equal 4503 4504 // TAIL COMPARISON 4505 // We got here because we reached the end of the arrays. 'limit' is the number of 4506 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4507 // out the garbage and compare the remaining elements. 4508 bind(Lremaining); 4509 // Optimistic shortcut: elements potentially including garbage are equal 4510 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4511 delayed()->mov(1, result); // equal 4512 // Shift 'limit' bytes to the right and compare 4513 sll(limit, 3, limit); // bytes to bits 4514 srlx(result, limit, result); 4515 srlx(tmp, limit, tmp); 4516 cmp(result, tmp); 4517 clr(result); 4518 movcc(Assembler::equal, false, xcc, 1, result); 4519 4520 bind(Ldone); 4521 } 4522 4523 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4524 4525 // test for negative bytes in input string of a given size 4526 // result 1 if found, 0 otherwise. 4527 4528 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4529 4530 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4531 4532 Register i = result; // result used as integer index i until very end 4533 Register lmask = t2; // t2 is aliased to lmask 4534 4535 // INITIALIZATION 4536 // =========================================================== 4537 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4538 // compute unaligned offset -> i 4539 // compute core end index -> t5 4540 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4541 add(t2, 0x80, t2); 4542 sllx(t2, 32, t3); 4543 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4544 sra(size,0,size); 4545 andcc(inp, 0x7, i); // unaligned offset -> i 4546 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4547 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4548 4549 // =========================================================== 4550 4551 // UNALIGNED HEAD 4552 // =========================================================== 4553 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4554 // * obliterate (ignore) bytes outside string by shifting off reg ends 4555 // * compare with bitmask, short circuit return true if one or more high 4556 // bits set. 4557 cmp(size, 0); 4558 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4559 delayed()->mov(0,result); // annuled so i not clobbered for following 4560 neg(i, t4); 4561 add(i, size, t5); 4562 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4563 mov(8, t4); 4564 sub(t4, t5, t4); 4565 sra(t4, 31, t5); 4566 andn(t4, t5, t5); 4567 add(i, t5, t4); 4568 sll(t5, 3, t5); 4569 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4570 srlx(t3, t5, t3); 4571 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4572 andcc(lmask, t3, G0); 4573 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4574 delayed()->mov(1,result); // annuled so i not clobbered for following 4575 add(size, -8, t5); // core end index -> t5 4576 mov(8, t4); 4577 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4578 // =========================================================== 4579 4580 // ALIGNED CORE 4581 // =========================================================== 4582 // * iterate index i over aligned 8B sections of core, comparing with 4583 // bitmask, short circuit return true if one or more high bits set 4584 // t5 contains core end index/loop limit which is the index 4585 // of the MSB of last (unaligned) 8B fully contained in the string. 4586 // inp contains address of first byte in string/array 4587 // lmask contains 8B high bit mask for comparison 4588 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4589 bind(Lcore); 4590 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4591 bind(Lcore_rpt); 4592 ldx(inp, i, t3); 4593 andcc(t3, lmask, G0); 4594 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4595 delayed()->mov(1, result); // annuled so i not clobbered for following 4596 add(i, 8, i); 4597 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4598 // =========================================================== 4599 4600 // ALIGNED TAIL (<8B) 4601 // =========================================================== 4602 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4603 // string bytes by shifting them off end, compare what's left with bitmask 4604 // inp contains address of first byte in string/array 4605 // lmask contains 8B high bit mask for comparison 4606 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4607 bind(Ltail); 4608 subcc(size, i, t4); // # of remaining bytes in string -> t4 4609 // return 0 if no more remaining bytes 4610 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4611 delayed()->mov(0, result); // annuled so i not clobbered for following 4612 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4613 mov(8, t5); 4614 sub(t5, t4, t4); 4615 mov(0, result); // ** i clobbered at this point 4616 sll(t4, 3, t4); // bits beyond end of string -> t4 4617 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4618 andcc(lmask, t3, G0); 4619 movcc(Assembler::notZero, false, xcc, 1, result); 4620 bind(Lreturn); 4621 } 4622 4623 #endif 4624 4625 4626 // Use BIS for zeroing (count is in bytes). 4627 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4628 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 4629 Register end = count; 4630 int cache_line_size = VM_Version::prefetch_data_size(); 4631 assert(cache_line_size > 0, "cache line size should be known for this code"); 4632 // Minimum count when BIS zeroing can be used since 4633 // it needs membar which is expensive. 4634 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4635 4636 Label small_loop; 4637 // Check if count is negative (dead code) or zero. 4638 // Note, count uses 64bit in 64 bit VM. 4639 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4640 4641 // Use BIS zeroing only for big arrays since it requires membar. 4642 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4643 cmp(count, block_zero_size); 4644 } else { 4645 set(block_zero_size, temp); 4646 cmp(count, temp); 4647 } 4648 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4649 delayed()->add(to, count, end); 4650 4651 // Note: size is >= three (32 bytes) cache lines. 4652 4653 // Clean the beginning of space up to next cache line. 4654 for (int offs = 0; offs < cache_line_size; offs += 8) { 4655 stx(G0, to, offs); 4656 } 4657 4658 // align to next cache line 4659 add(to, cache_line_size, to); 4660 and3(to, -cache_line_size, to); 4661 4662 // Note: size left >= two (32 bytes) cache lines. 4663 4664 // BIS should not be used to zero tail (64 bytes) 4665 // to avoid zeroing a header of the following object. 4666 sub(end, (cache_line_size*2)-8, end); 4667 4668 Label bis_loop; 4669 bind(bis_loop); 4670 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4671 add(to, cache_line_size, to); 4672 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4673 4674 // BIS needs membar. 4675 membar(Assembler::StoreLoad); 4676 4677 add(end, (cache_line_size*2)-8, end); // restore end 4678 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4679 4680 // Clean the tail. 4681 bind(small_loop); 4682 stx(G0, to, 0); 4683 add(to, 8, to); 4684 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4685 nop(); // Separate short branches 4686 } 4687 4688 /** 4689 * Update CRC-32[C] with a byte value according to constants in table 4690 * 4691 * @param [in,out]crc Register containing the crc. 4692 * @param [in]val Register containing the byte to fold into the CRC. 4693 * @param [in]table Register containing the table of crc constants. 4694 * 4695 * uint32_t crc; 4696 * val = crc_table[(val ^ crc) & 0xFF]; 4697 * crc = val ^ (crc >> 8); 4698 */ 4699 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4700 xor3(val, crc, val); 4701 and3(val, 0xFF, val); 4702 sllx(val, 2, val); 4703 lduw(table, val, val); 4704 srlx(crc, 8, crc); 4705 xor3(val, crc, crc); 4706 } 4707 4708 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4709 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4710 srlx(src, 24, dst); 4711 4712 sllx(src, 32+8, tmp); 4713 srlx(tmp, 32+24, tmp); 4714 sllx(tmp, 8, tmp); 4715 or3(dst, tmp, dst); 4716 4717 sllx(src, 32+16, tmp); 4718 srlx(tmp, 32+24, tmp); 4719 sllx(tmp, 16, tmp); 4720 or3(dst, tmp, dst); 4721 4722 sllx(src, 32+24, tmp); 4723 srlx(tmp, 32, tmp); 4724 or3(dst, tmp, dst); 4725 } 4726 4727 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4728 reverse_bytes_32(src, tmp1, tmp2); 4729 movxtod(tmp1, dst); 4730 } 4731 4732 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4733 movdtox(src, tmp1); 4734 reverse_bytes_32(tmp1, dst, tmp2); 4735 } 4736 4737 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4738 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4739 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4740 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4741 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4742 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4743 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4744 ldxl(buf, G0, xtmp_lo); 4745 inc(buf, 8); 4746 ldxl(buf, G0, xtmp_hi); 4747 inc(buf, 8); 4748 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4749 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4750 } 4751 4752 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4753 mov(xcrc_lo, xtmp_lo); 4754 mov(xcrc_hi, xtmp_hi); 4755 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4756 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4757 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4758 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4759 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4760 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4761 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4762 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4763 } 4764 4765 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4766 and3(xcrc, 0xFF, tmp); 4767 sllx(tmp, 2, tmp); 4768 lduw(table, tmp, xtmp); 4769 srlx(xcrc, 8, xcrc); 4770 xor3(xtmp, xcrc, xcrc); 4771 } 4772 4773 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4774 and3(crc, 0xFF, tmp); 4775 srlx(crc, 8, crc); 4776 sllx(tmp, 2, tmp); 4777 lduw(table, tmp, tmp); 4778 xor3(tmp, crc, crc); 4779 } 4780 4781 #define CRC32_TMP_REG_NUM 18 4782 4783 #define CRC32_CONST_64 0x163cd6124 4784 #define CRC32_CONST_96 0x0ccaa009e 4785 #define CRC32_CONST_160 0x1751997d0 4786 #define CRC32_CONST_480 0x1c6e41596 4787 #define CRC32_CONST_544 0x154442bd4 4788 4789 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4790 4791 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4792 Label L_main_loop_prologue; 4793 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4794 Label L_fold_tail, L_fold_tail_loop; 4795 Label L_8byte_fold_loop, L_8byte_fold_check; 4796 4797 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4798 4799 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4800 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4801 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4802 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4803 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4804 4805 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4806 4807 not1(crc); // ~c 4808 clruwu(crc); // clear upper 32 bits of crc 4809 4810 // Check if below cutoff, proceed directly to cleanup code 4811 mov(31, G4); 4812 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4813 4814 // Align buffer to 8 byte boundry 4815 mov(8, O5); 4816 and3(buf, 0x7, O4); 4817 sub(O5, O4, O5); 4818 and3(O5, 0x7, O5); 4819 sub(len, O5, len); 4820 ba(L_align_check); 4821 delayed()->nop(); 4822 4823 // Alignment loop, table look up method for up to 7 bytes 4824 bind(L_align_loop); 4825 ldub(buf, 0, O4); 4826 inc(buf); 4827 dec(O5); 4828 xor3(O4, crc, O4); 4829 and3(O4, 0xFF, O4); 4830 sllx(O4, 2, O4); 4831 lduw(table, O4, O4); 4832 srlx(crc, 8, crc); 4833 xor3(O4, crc, crc); 4834 bind(L_align_check); 4835 nop(); 4836 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4837 4838 // Aligned on 64-bit (8-byte) boundry at this point 4839 // Check if still above cutoff (31-bytes) 4840 mov(31, G4); 4841 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4842 // At least 32 bytes left to process 4843 4844 // Free up registers by storing them to FP registers 4845 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4846 movxtod(tmp[i], as_FloatRegister(2*i)); 4847 } 4848 4849 // Determine which loop to enter 4850 // Shared prologue 4851 ldxl(buf, G0, tmp[0]); 4852 inc(buf, 8); 4853 ldxl(buf, G0, tmp[1]); 4854 inc(buf, 8); 4855 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4856 and3(crc, 0, crc); // Clear out the crc register 4857 // Main loop needs 128-bytes at least 4858 mov(128, G4); 4859 mov(64, tmp[2]); 4860 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4861 // Less than 64 bytes 4862 nop(); 4863 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4864 // Between 64 and 127 bytes 4865 set64(CRC32_CONST_96, const_96, tmp[8]); 4866 set64(CRC32_CONST_160, const_160, tmp[9]); 4867 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4868 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4869 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4870 dec(len, 48); 4871 ba(L_fold_tail); 4872 delayed()->nop(); 4873 4874 bind(L_main_loop_prologue); 4875 for (int i = 2; i < 8; i++) { 4876 ldxl(buf, G0, tmp[i]); 4877 inc(buf, 8); 4878 } 4879 4880 // Fold total 512 bits of polynomial on each iteration, 4881 // 128 bits per each of 4 parallel streams 4882 set64(CRC32_CONST_480, const_480, tmp[8]); 4883 set64(CRC32_CONST_544, const_544, tmp[9]); 4884 4885 mov(128, G4); 4886 bind(L_fold_512b_loop); 4887 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4888 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4889 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4890 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4891 dec(len, 64); 4892 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4893 4894 // Fold 512 bits to 128 bits 4895 bind(L_fold_512b); 4896 set64(CRC32_CONST_96, const_96, tmp[8]); 4897 set64(CRC32_CONST_160, const_160, tmp[9]); 4898 4899 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4900 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4901 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4902 dec(len, 48); 4903 4904 // Fold the rest of 128 bits data chunks 4905 bind(L_fold_tail); 4906 mov(32, G4); 4907 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4908 4909 set64(CRC32_CONST_96, const_96, tmp[8]); 4910 set64(CRC32_CONST_160, const_160, tmp[9]); 4911 4912 bind(L_fold_tail_loop); 4913 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4914 sub(len, 16, len); 4915 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4916 4917 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4918 bind(L_fold_128b); 4919 4920 set64(CRC32_CONST_64, const_64, tmp[4]); 4921 4922 xmulx(const_64, tmp[0], tmp[2]); 4923 xmulxhi(const_64, tmp[0], tmp[3]); 4924 4925 srl(tmp[2], G0, tmp[4]); 4926 xmulx(const_64, tmp[4], tmp[4]); 4927 4928 srlx(tmp[2], 32, tmp[2]); 4929 sllx(tmp[3], 32, tmp[3]); 4930 or3(tmp[2], tmp[3], tmp[2]); 4931 4932 xor3(tmp[4], tmp[1], tmp[4]); 4933 xor3(tmp[4], tmp[2], tmp[1]); 4934 dec(len, 8); 4935 4936 // Use table lookup for the 8 bytes left in tmp[1] 4937 dec(len, 8); 4938 4939 // 8 8-bit folds to compute 32-bit CRC. 4940 for (int j = 0; j < 4; j++) { 4941 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4942 } 4943 srl(tmp[1], G0, crc); // move 32 bits to general register 4944 for (int j = 0; j < 4; j++) { 4945 fold_8bit_crc32(crc, table, tmp[3]); 4946 } 4947 4948 bind(L_8byte_fold_check); 4949 4950 // Restore int registers saved in FP registers 4951 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4952 movdtox(as_FloatRegister(2*i), tmp[i]); 4953 } 4954 4955 ba(L_cleanup_check); 4956 delayed()->nop(); 4957 4958 // Table look-up method for the remaining few bytes 4959 bind(L_cleanup_loop); 4960 ldub(buf, 0, O4); 4961 inc(buf); 4962 dec(len); 4963 xor3(O4, crc, O4); 4964 and3(O4, 0xFF, O4); 4965 sllx(O4, 2, O4); 4966 lduw(table, O4, O4); 4967 srlx(crc, 8, crc); 4968 xor3(O4, crc, crc); 4969 bind(L_cleanup_check); 4970 nop(); 4971 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4972 4973 not1(crc); 4974 } 4975 4976 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4977 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4978 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4979 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4980 4981 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4982 4983 Label L_crc32c_head, L_crc32c_aligned; 4984 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4985 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4986 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4987 4988 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4989 4990 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4991 4992 // clear upper 32 bits of crc 4993 clruwu(crc); 4994 4995 and3(buf, 7, G4); 4996 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4997 4998 mov(8, G1); 4999 sub(G1, G4, G4); 5000 5001 // ------ process the misaligned head (7 bytes or less) ------ 5002 bind(L_crc32c_head); 5003 5004 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5005 ldub(buf, 0, G1); 5006 update_byte_crc32(crc, G1, table); 5007 5008 inc(buf); 5009 dec(len); 5010 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 5011 dec(G4); 5012 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 5013 5014 // ------ process the 8-byte-aligned body ------ 5015 bind(L_crc32c_aligned); 5016 nop(); 5017 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 5018 5019 // reverse the byte order of lower 32 bits to big endian, and move to FP side 5020 movitof_revbytes(crc, F0, G1, G3); 5021 5022 set(CHUNK_LEN*8*4, G4); 5023 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 5024 5025 // ------ process four 1KB chunks in parallel ------ 5026 bind(L_crc32c_parallel); 5027 5028 fzero(FloatRegisterImpl::D, F2); 5029 fzero(FloatRegisterImpl::D, F4); 5030 fzero(FloatRegisterImpl::D, F6); 5031 5032 mov(CHUNK_LEN - 1, G4); 5033 bind(L_crc32c_parallel_loop); 5034 // schedule ldf's ahead of crc32c's to hide the load-use latency 5035 ldf(FloatRegisterImpl::D, buf, 0, F8); 5036 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5037 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5038 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 5039 crc32c(F0, F8, F0); 5040 crc32c(F2, F10, F2); 5041 crc32c(F4, F12, F4); 5042 crc32c(F6, F14, F6); 5043 inc(buf, 8); 5044 dec(G4); 5045 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 5046 5047 ldf(FloatRegisterImpl::D, buf, 0, F8); 5048 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5049 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5050 crc32c(F0, F8, F0); 5051 crc32c(F2, F10, F2); 5052 crc32c(F4, F12, F4); 5053 5054 inc(buf, CHUNK_LEN*24); 5055 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 5056 inc(buf, 8); 5057 5058 prefetch(buf, 0, Assembler::severalReads); 5059 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 5060 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 5061 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 5062 5063 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5064 movftoi_revbytes(F0, O4, G1, G4); 5065 movftoi_revbytes(F2, O5, G1, G4); 5066 movftoi_revbytes(F4, G5, G1, G4); 5067 5068 // combine the results of 4 chunks 5069 set64(CHUNK_K1, G3, G1); 5070 xmulx(O4, G3, O4); 5071 set64(CHUNK_K2, G3, G1); 5072 xmulx(O5, G3, O5); 5073 set64(CHUNK_K3, G3, G1); 5074 xmulx(G5, G3, G5); 5075 5076 movdtox(F14, G4); 5077 xor3(O4, O5, O5); 5078 xor3(G5, O5, O5); 5079 xor3(G4, O5, O5); 5080 5081 // reverse the byte order to big endian, via stack, and move to FP side 5082 // TODO: use new revb instruction 5083 add(SP, -8, G1); 5084 srlx(G1, 3, G1); 5085 sllx(G1, 3, G1); 5086 stx(O5, G1, G0); 5087 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5088 5089 crc32c(F6, F2, F0); 5090 5091 set(CHUNK_LEN*8*4, G4); 5092 sub(len, G4, len); 5093 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5094 nop(); 5095 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5096 5097 bind(L_crc32c_serial); 5098 5099 mov(32, G4); 5100 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5101 5102 // ------ process 32B chunks ------ 5103 bind(L_crc32c_x32_loop); 5104 ldf(FloatRegisterImpl::D, buf, 0, F2); 5105 crc32c(F0, F2, F0); 5106 ldf(FloatRegisterImpl::D, buf, 8, F2); 5107 crc32c(F0, F2, F0); 5108 ldf(FloatRegisterImpl::D, buf, 16, F2); 5109 crc32c(F0, F2, F0); 5110 ldf(FloatRegisterImpl::D, buf, 24, F2); 5111 inc(buf, 32); 5112 crc32c(F0, F2, F0); 5113 dec(len, 32); 5114 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5115 5116 bind(L_crc32c_x8); 5117 nop(); 5118 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5119 5120 // ------ process 8B chunks ------ 5121 bind(L_crc32c_x8_loop); 5122 ldf(FloatRegisterImpl::D, buf, 0, F2); 5123 inc(buf, 8); 5124 crc32c(F0, F2, F0); 5125 dec(len, 8); 5126 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5127 5128 bind(L_crc32c_done); 5129 5130 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5131 movftoi_revbytes(F0, crc, G1, G3); 5132 5133 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5134 5135 // ------ process the misaligned tail (7 bytes or less) ------ 5136 bind(L_crc32c_tail); 5137 5138 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5139 ldub(buf, 0, G1); 5140 update_byte_crc32(crc, G1, table); 5141 5142 inc(buf); 5143 dec(len); 5144 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5145 5146 bind(L_crc32c_return); 5147 nop(); 5148 }