1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/klass.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/biasedLocking.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/objectMonitor.hpp" 38 #include "runtime/os.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc/g1/g1CollectedHeap.inline.hpp" 44 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 45 #include "gc/g1/heapRegion.hpp" 46 #endif // INCLUDE_ALL_GCS 47 #ifdef COMPILER2 48 #include "opto/intrinsicnode.hpp" 49 #endif 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) /* nothing */ 53 #define STOP(error) stop(error) 54 #else 55 #define BLOCK_COMMENT(str) block_comment(str) 56 #define STOP(error) block_comment(error); stop(error) 57 #endif 58 59 // Convert the raw encoding form into the form expected by the 60 // constructor for Address. 61 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 62 assert(scale == 0, "not supported"); 63 RelocationHolder rspec; 64 if (disp_reloc != relocInfo::none) { 65 rspec = Relocation::spec_simple(disp_reloc); 66 } 67 68 Register rindex = as_Register(index); 69 if (rindex != G0) { 70 Address madr(as_Register(base), rindex); 71 madr._rspec = rspec; 72 return madr; 73 } else { 74 Address madr(as_Register(base), disp); 75 madr._rspec = rspec; 76 return madr; 77 } 78 } 79 80 Address Argument::address_in_frame() const { 81 // Warning: In LP64 mode disp will occupy more than 10 bits, but 82 // op codes such as ld or ldx, only access disp() to get 83 // their simm13 argument. 84 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 85 if (is_in()) 86 return Address(FP, disp); // In argument. 87 else 88 return Address(SP, disp); // Out argument. 89 } 90 91 static const char* argumentNames[][2] = { 92 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 93 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 94 {"A(n>9)","P(n>9)"} 95 }; 96 97 const char* Argument::name() const { 98 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 99 int num = number(); 100 if (num >= nofArgs) num = nofArgs - 1; 101 return argumentNames[num][is_in() ? 1 : 0]; 102 } 103 104 #ifdef ASSERT 105 // On RISC, there's no benefit to verifying instruction boundaries. 106 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 107 #endif 108 109 // Patch instruction inst at offset inst_pos to refer to dest_pos 110 // and return the resulting instruction. 111 // We should have pcs, not offsets, but since all is relative, it will work out 112 // OK. 113 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 114 int m; // mask for displacement field 115 int v; // new value for displacement field 116 const int word_aligned_ones = -4; 117 switch (inv_op(inst)) { 118 default: ShouldNotReachHere(); 119 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 120 case branch_op: 121 switch (inv_op2(inst)) { 122 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 123 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 124 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 125 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 126 case bpr_op2: { 127 if (is_cbcond(inst)) { 128 m = wdisp10(word_aligned_ones, 0); 129 v = wdisp10(dest_pos, inst_pos); 130 } else { 131 m = wdisp16(word_aligned_ones, 0); 132 v = wdisp16(dest_pos, inst_pos); 133 } 134 break; 135 } 136 default: ShouldNotReachHere(); 137 } 138 } 139 return inst & ~m | v; 140 } 141 142 // Return the offset of the branch destionation of instruction inst 143 // at offset pos. 144 // Should have pcs, but since all is relative, it works out. 145 int MacroAssembler::branch_destination(int inst, int pos) { 146 int r; 147 switch (inv_op(inst)) { 148 default: ShouldNotReachHere(); 149 case call_op: r = inv_wdisp(inst, pos, 30); break; 150 case branch_op: 151 switch (inv_op2(inst)) { 152 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 153 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 154 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 155 case br_op2: r = inv_wdisp( inst, pos, 22); break; 156 case bpr_op2: { 157 if (is_cbcond(inst)) { 158 r = inv_wdisp10(inst, pos); 159 } else { 160 r = inv_wdisp16(inst, pos); 161 } 162 break; 163 } 164 default: ShouldNotReachHere(); 165 } 166 } 167 return r; 168 } 169 170 void MacroAssembler::null_check(Register reg, int offset) { 171 if (needs_explicit_null_check((intptr_t)offset)) { 172 // provoke OS NULL exception if reg = NULL by 173 // accessing M[reg] w/o changing any registers 174 ld_ptr(reg, 0, G0); 175 } 176 else { 177 // nothing to do, (later) access of M[reg + offset] 178 // will provoke OS NULL exception if reg = NULL 179 } 180 } 181 182 // Ring buffer jumps 183 184 185 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 186 assert_not_delayed(); 187 jmpl(r1, r2, G0); 188 } 189 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 190 assert_not_delayed(); 191 jmp(r1, offset); 192 } 193 194 // This code sequence is relocatable to any address, even on LP64. 195 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 196 assert_not_delayed(); 197 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 198 // variable length instruction streams. 199 patchable_sethi(addrlit, temp); 200 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 201 jmpl(a.base(), a.disp(), d); 202 } 203 204 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 205 jumpl(addrlit, temp, G0, offset, file, line); 206 } 207 208 209 // Conditional breakpoint (for assertion checks in assembly code) 210 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 211 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 212 } 213 214 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 215 void MacroAssembler::breakpoint_trap() { 216 trap(ST_RESERVED_FOR_USER_0); 217 } 218 219 // Write serialization page so VM thread can do a pseudo remote membar 220 // We use the current thread pointer to calculate a thread specific 221 // offset to write to within the page. This minimizes bus traffic 222 // due to cache line collision. 223 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 224 srl(thread, os::get_serialize_page_shift_count(), tmp2); 225 if (Assembler::is_simm13(os::vm_page_size())) { 226 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 227 } 228 else { 229 set((os::vm_page_size() - sizeof(int)), tmp1); 230 and3(tmp2, tmp1, tmp2); 231 } 232 set(os::get_memory_serialize_page(), tmp1); 233 st(G0, tmp1, tmp2); 234 } 235 236 237 238 void MacroAssembler::enter() { 239 Unimplemented(); 240 } 241 242 void MacroAssembler::leave() { 243 Unimplemented(); 244 } 245 246 // Calls to C land 247 248 #ifdef ASSERT 249 // a hook for debugging 250 static Thread* reinitialize_thread() { 251 return Thread::current(); 252 } 253 #else 254 #define reinitialize_thread Thread::current 255 #endif 256 257 #ifdef ASSERT 258 address last_get_thread = NULL; 259 #endif 260 261 // call this when G2_thread is not known to be valid 262 void MacroAssembler::get_thread() { 263 save_frame(0); // to avoid clobbering O0 264 mov(G1, L0); // avoid clobbering G1 265 mov(G5_method, L1); // avoid clobbering G5 266 mov(G3, L2); // avoid clobbering G3 also 267 mov(G4, L5); // avoid clobbering G4 268 #ifdef ASSERT 269 AddressLiteral last_get_thread_addrlit(&last_get_thread); 270 set(last_get_thread_addrlit, L3); 271 rdpc(L4); 272 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 273 #endif 274 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 275 delayed()->nop(); 276 mov(L0, G1); 277 mov(L1, G5_method); 278 mov(L2, G3); 279 mov(L5, G4); 280 restore(O0, 0, G2_thread); 281 } 282 283 static Thread* verify_thread_subroutine(Thread* gthread_value) { 284 Thread* correct_value = Thread::current(); 285 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 286 return correct_value; 287 } 288 289 void MacroAssembler::verify_thread() { 290 if (VerifyThread) { 291 // NOTE: this chops off the heads of the 64-bit O registers. 292 // make sure G2_thread contains the right value 293 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 294 mov(G1, L1); // avoid clobbering G1 295 // G2 saved below 296 mov(G3, L3); // avoid clobbering G3 297 mov(G4, L4); // avoid clobbering G4 298 mov(G5_method, L5); // avoid clobbering G5_method 299 #if defined(COMPILER2) && !defined(_LP64) 300 // Save & restore possible 64-bit Long arguments in G-regs 301 srlx(G1,32,L0); 302 srlx(G4,32,L6); 303 #endif 304 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 305 delayed()->mov(G2_thread, O0); 306 307 mov(L1, G1); // Restore G1 308 // G2 restored below 309 mov(L3, G3); // restore G3 310 mov(L4, G4); // restore G4 311 mov(L5, G5_method); // restore G5_method 312 #if defined(COMPILER2) && !defined(_LP64) 313 // Save & restore possible 64-bit Long arguments in G-regs 314 sllx(L0,32,G2); // Move old high G1 bits high in G2 315 srl(G1, 0,G1); // Clear current high G1 bits 316 or3 (G1,G2,G1); // Recover 64-bit G1 317 sllx(L6,32,G2); // Move old high G4 bits high in G2 318 srl(G4, 0,G4); // Clear current high G4 bits 319 or3 (G4,G2,G4); // Recover 64-bit G4 320 #endif 321 restore(O0, 0, G2_thread); 322 } 323 } 324 325 326 void MacroAssembler::save_thread(const Register thread_cache) { 327 verify_thread(); 328 if (thread_cache->is_valid()) { 329 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 330 mov(G2_thread, thread_cache); 331 } 332 if (VerifyThread) { 333 // smash G2_thread, as if the VM were about to anyway 334 set(0x67676767, G2_thread); 335 } 336 } 337 338 339 void MacroAssembler::restore_thread(const Register thread_cache) { 340 if (thread_cache->is_valid()) { 341 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 342 mov(thread_cache, G2_thread); 343 verify_thread(); 344 } else { 345 // do it the slow way 346 get_thread(); 347 } 348 } 349 350 351 // %%% maybe get rid of [re]set_last_Java_frame 352 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 353 assert_not_delayed(); 354 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 355 JavaFrameAnchor::flags_offset()); 356 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 357 358 // Always set last_Java_pc and flags first because once last_Java_sp is visible 359 // has_last_Java_frame is true and users will look at the rest of the fields. 360 // (Note: flags should always be zero before we get here so doesn't need to be set.) 361 362 #ifdef ASSERT 363 // Verify that flags was zeroed on return to Java 364 Label PcOk; 365 save_frame(0); // to avoid clobbering O0 366 ld_ptr(pc_addr, L0); 367 br_null_short(L0, Assembler::pt, PcOk); 368 STOP("last_Java_pc not zeroed before leaving Java"); 369 bind(PcOk); 370 371 // Verify that flags was zeroed on return to Java 372 Label FlagsOk; 373 ld(flags, L0); 374 tst(L0); 375 br(Assembler::zero, false, Assembler::pt, FlagsOk); 376 delayed() -> restore(); 377 STOP("flags not zeroed before leaving Java"); 378 bind(FlagsOk); 379 #endif /* ASSERT */ 380 // 381 // When returning from calling out from Java mode the frame anchor's last_Java_pc 382 // will always be set to NULL. It is set here so that if we are doing a call to 383 // native (not VM) that we capture the known pc and don't have to rely on the 384 // native call having a standard frame linkage where we can find the pc. 385 386 if (last_Java_pc->is_valid()) { 387 st_ptr(last_Java_pc, pc_addr); 388 } 389 390 #ifdef _LP64 391 #ifdef ASSERT 392 // Make sure that we have an odd stack 393 Label StackOk; 394 andcc(last_java_sp, 0x01, G0); 395 br(Assembler::notZero, false, Assembler::pt, StackOk); 396 delayed()->nop(); 397 STOP("Stack Not Biased in set_last_Java_frame"); 398 bind(StackOk); 399 #endif // ASSERT 400 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 401 add( last_java_sp, STACK_BIAS, G4_scratch ); 402 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 403 #else 404 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); 405 #endif // _LP64 406 } 407 408 void MacroAssembler::reset_last_Java_frame(void) { 409 assert_not_delayed(); 410 411 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 412 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 413 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 414 415 #ifdef ASSERT 416 // check that it WAS previously set 417 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 418 ld_ptr(sp_addr, L0); 419 tst(L0); 420 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 421 restore(); 422 #endif // ASSERT 423 424 st_ptr(G0, sp_addr); 425 // Always return last_Java_pc to zero 426 st_ptr(G0, pc_addr); 427 // Always null flags after return to Java 428 st(G0, flags); 429 } 430 431 432 void MacroAssembler::call_VM_base( 433 Register oop_result, 434 Register thread_cache, 435 Register last_java_sp, 436 address entry_point, 437 int number_of_arguments, 438 bool check_exceptions) 439 { 440 assert_not_delayed(); 441 442 // determine last_java_sp register 443 if (!last_java_sp->is_valid()) { 444 last_java_sp = SP; 445 } 446 // debugging support 447 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 448 449 // 64-bit last_java_sp is biased! 450 set_last_Java_frame(last_java_sp, noreg); 451 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 452 save_thread(thread_cache); 453 // do the call 454 call(entry_point, relocInfo::runtime_call_type); 455 if (!VerifyThread) 456 delayed()->mov(G2_thread, O0); // pass thread as first argument 457 else 458 delayed()->nop(); // (thread already passed) 459 restore_thread(thread_cache); 460 reset_last_Java_frame(); 461 462 // check for pending exceptions. use Gtemp as scratch register. 463 if (check_exceptions) { 464 check_and_forward_exception(Gtemp); 465 } 466 467 #ifdef ASSERT 468 set(badHeapWordVal, G3); 469 set(badHeapWordVal, G4); 470 set(badHeapWordVal, G5); 471 #endif 472 473 // get oop result if there is one and reset the value in the thread 474 if (oop_result->is_valid()) { 475 get_vm_result(oop_result); 476 } 477 } 478 479 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 480 { 481 Label L; 482 483 check_and_handle_popframe(scratch_reg); 484 check_and_handle_earlyret(scratch_reg); 485 486 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 487 ld_ptr(exception_addr, scratch_reg); 488 br_null_short(scratch_reg, pt, L); 489 // we use O7 linkage so that forward_exception_entry has the issuing PC 490 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 491 delayed()->nop(); 492 bind(L); 493 } 494 495 496 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 497 } 498 499 500 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 501 } 502 503 504 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 505 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 506 } 507 508 509 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 510 // O0 is reserved for the thread 511 mov(arg_1, O1); 512 call_VM(oop_result, entry_point, 1, check_exceptions); 513 } 514 515 516 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 517 // O0 is reserved for the thread 518 mov(arg_1, O1); 519 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 520 call_VM(oop_result, entry_point, 2, check_exceptions); 521 } 522 523 524 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 525 // O0 is reserved for the thread 526 mov(arg_1, O1); 527 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 528 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 529 call_VM(oop_result, entry_point, 3, check_exceptions); 530 } 531 532 533 534 // Note: The following call_VM overloadings are useful when a "save" 535 // has already been performed by a stub, and the last Java frame is 536 // the previous one. In that case, last_java_sp must be passed as FP 537 // instead of SP. 538 539 540 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 541 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 542 } 543 544 545 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 546 // O0 is reserved for the thread 547 mov(arg_1, O1); 548 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 549 } 550 551 552 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 553 // O0 is reserved for the thread 554 mov(arg_1, O1); 555 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 556 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 557 } 558 559 560 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 561 // O0 is reserved for the thread 562 mov(arg_1, O1); 563 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 564 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 565 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 566 } 567 568 569 570 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 571 assert_not_delayed(); 572 save_thread(thread_cache); 573 // do the call 574 call(entry_point, relocInfo::runtime_call_type); 575 delayed()->nop(); 576 restore_thread(thread_cache); 577 #ifdef ASSERT 578 set(badHeapWordVal, G3); 579 set(badHeapWordVal, G4); 580 set(badHeapWordVal, G5); 581 #endif 582 } 583 584 585 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 586 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 587 } 588 589 590 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 591 mov(arg_1, O0); 592 call_VM_leaf(thread_cache, entry_point, 1); 593 } 594 595 596 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 597 mov(arg_1, O0); 598 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 599 call_VM_leaf(thread_cache, entry_point, 2); 600 } 601 602 603 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 604 mov(arg_1, O0); 605 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 606 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 607 call_VM_leaf(thread_cache, entry_point, 3); 608 } 609 610 611 void MacroAssembler::get_vm_result(Register oop_result) { 612 verify_thread(); 613 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 614 ld_ptr( vm_result_addr, oop_result); 615 st_ptr(G0, vm_result_addr); 616 verify_oop(oop_result); 617 } 618 619 620 void MacroAssembler::get_vm_result_2(Register metadata_result) { 621 verify_thread(); 622 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 623 ld_ptr(vm_result_addr_2, metadata_result); 624 st_ptr(G0, vm_result_addr_2); 625 } 626 627 628 // We require that C code which does not return a value in vm_result will 629 // leave it undisturbed. 630 void MacroAssembler::set_vm_result(Register oop_result) { 631 verify_thread(); 632 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 633 verify_oop(oop_result); 634 635 # ifdef ASSERT 636 // Check that we are not overwriting any other oop. 637 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 638 ld_ptr(vm_result_addr, L0); 639 tst(L0); 640 restore(); 641 breakpoint_trap(notZero, Assembler::ptr_cc); 642 // } 643 # endif 644 645 st_ptr(oop_result, vm_result_addr); 646 } 647 648 649 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 650 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 651 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 652 relocate(rspec); 653 call(entry, relocInfo::none); 654 if (emit_delay) { 655 delayed()->nop(); 656 } 657 } 658 659 void MacroAssembler::card_table_write(jbyte* byte_map_base, 660 Register tmp, Register obj) { 661 #ifdef _LP64 662 srlx(obj, CardTableModRefBS::card_shift, obj); 663 #else 664 srl(obj, CardTableModRefBS::card_shift, obj); 665 #endif 666 assert(tmp != obj, "need separate temp reg"); 667 set((address) byte_map_base, tmp); 668 stb(G0, tmp, obj); 669 } 670 671 672 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 673 address save_pc; 674 int shiftcnt; 675 #ifdef _LP64 676 # ifdef CHECK_DELAY 677 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 678 # endif 679 v9_dep(); 680 save_pc = pc(); 681 682 int msb32 = (int) (addrlit.value() >> 32); 683 int lsb32 = (int) (addrlit.value()); 684 685 if (msb32 == 0 && lsb32 >= 0) { 686 Assembler::sethi(lsb32, d, addrlit.rspec()); 687 } 688 else if (msb32 == -1) { 689 Assembler::sethi(~lsb32, d, addrlit.rspec()); 690 xor3(d, ~low10(~0), d); 691 } 692 else { 693 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 694 if (msb32 & 0x3ff) // Any bits? 695 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 696 if (lsb32 & 0xFFFFFC00) { // done? 697 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 698 sllx(d, 12, d); // Make room for next 12 bits 699 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 700 shiftcnt = 0; // We already shifted 701 } 702 else 703 shiftcnt = 12; 704 if ((lsb32 >> 10) & 0x3ff) { 705 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 706 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 707 shiftcnt = 0; 708 } 709 else 710 shiftcnt = 10; 711 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 712 } 713 else 714 sllx(d, 32, d); 715 } 716 // Pad out the instruction sequence so it can be patched later. 717 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 718 addrlit.rtype() != relocInfo::runtime_call_type)) { 719 while (pc() < (save_pc + (7 * BytesPerInstWord))) 720 nop(); 721 } 722 #else 723 Assembler::sethi(addrlit.value(), d, addrlit.rspec()); 724 #endif 725 } 726 727 728 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 729 internal_sethi(addrlit, d, false); 730 } 731 732 733 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 734 internal_sethi(addrlit, d, true); 735 } 736 737 738 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 739 #ifdef _LP64 740 if (worst_case) return 7; 741 intptr_t iaddr = (intptr_t) a; 742 int msb32 = (int) (iaddr >> 32); 743 int lsb32 = (int) (iaddr); 744 int count; 745 if (msb32 == 0 && lsb32 >= 0) 746 count = 1; 747 else if (msb32 == -1) 748 count = 2; 749 else { 750 count = 2; 751 if (msb32 & 0x3ff) 752 count++; 753 if (lsb32 & 0xFFFFFC00 ) { 754 if ((lsb32 >> 20) & 0xfff) count += 2; 755 if ((lsb32 >> 10) & 0x3ff) count += 2; 756 } 757 } 758 return count; 759 #else 760 return 1; 761 #endif 762 } 763 764 int MacroAssembler::worst_case_insts_for_set() { 765 return insts_for_sethi(NULL, true) + 1; 766 } 767 768 769 // Keep in sync with MacroAssembler::insts_for_internal_set 770 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 771 intptr_t value = addrlit.value(); 772 773 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 774 // can optimize 775 if (-4096 <= value && value <= 4095) { 776 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 777 return; 778 } 779 if (inv_hi22(hi22(value)) == value) { 780 sethi(addrlit, d); 781 return; 782 } 783 } 784 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 785 internal_sethi(addrlit, d, ForceRelocatable); 786 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 787 add(d, addrlit.low10(), d, addrlit.rspec()); 788 } 789 } 790 791 // Keep in sync with MacroAssembler::internal_set 792 int MacroAssembler::insts_for_internal_set(intptr_t value) { 793 // can optimize 794 if (-4096 <= value && value <= 4095) { 795 return 1; 796 } 797 if (inv_hi22(hi22(value)) == value) { 798 return insts_for_sethi((address) value); 799 } 800 int count = insts_for_sethi((address) value); 801 AddressLiteral al(value); 802 if (al.low10() != 0) { 803 count++; 804 } 805 return count; 806 } 807 808 void MacroAssembler::set(const AddressLiteral& al, Register d) { 809 internal_set(al, d, false); 810 } 811 812 void MacroAssembler::set(intptr_t value, Register d) { 813 AddressLiteral al(value); 814 internal_set(al, d, false); 815 } 816 817 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 818 AddressLiteral al(addr, rspec); 819 internal_set(al, d, false); 820 } 821 822 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 823 internal_set(al, d, true); 824 } 825 826 void MacroAssembler::patchable_set(intptr_t value, Register d) { 827 AddressLiteral al(value); 828 internal_set(al, d, true); 829 } 830 831 832 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 833 assert_not_delayed(); 834 v9_dep(); 835 836 int hi = (int)(value >> 32); 837 int lo = (int)(value & ~0); 838 int bits_33to2 = (int)((value >> 2) & ~0); 839 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 840 if (Assembler::is_simm13(lo) && value == lo) { 841 or3(G0, lo, d); 842 } else if (hi == 0) { 843 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 844 if (low10(lo) != 0) 845 or3(d, low10(lo), d); 846 } 847 else if ((hi >> 2) == 0) { 848 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 849 sllx(d, 2, d); 850 if (low12(lo) != 0) 851 or3(d, low12(lo), d); 852 } 853 else if (hi == -1) { 854 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 855 xor3(d, low10(lo) ^ ~low10(~0), d); 856 } 857 else if (lo == 0) { 858 if (Assembler::is_simm13(hi)) { 859 or3(G0, hi, d); 860 } else { 861 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 862 if (low10(hi) != 0) 863 or3(d, low10(hi), d); 864 } 865 sllx(d, 32, d); 866 } 867 else { 868 Assembler::sethi(hi, tmp); 869 Assembler::sethi(lo, d); // macro assembler version sign-extends 870 if (low10(hi) != 0) 871 or3 (tmp, low10(hi), tmp); 872 if (low10(lo) != 0) 873 or3 ( d, low10(lo), d); 874 sllx(tmp, 32, tmp); 875 or3 (d, tmp, d); 876 } 877 } 878 879 int MacroAssembler::insts_for_set64(jlong value) { 880 v9_dep(); 881 882 int hi = (int) (value >> 32); 883 int lo = (int) (value & ~0); 884 int count = 0; 885 886 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 887 if (Assembler::is_simm13(lo) && value == lo) { 888 count++; 889 } else if (hi == 0) { 890 count++; 891 if (low10(lo) != 0) 892 count++; 893 } 894 else if (hi == -1) { 895 count += 2; 896 } 897 else if (lo == 0) { 898 if (Assembler::is_simm13(hi)) { 899 count++; 900 } else { 901 count++; 902 if (low10(hi) != 0) 903 count++; 904 } 905 count++; 906 } 907 else { 908 count += 2; 909 if (low10(hi) != 0) 910 count++; 911 if (low10(lo) != 0) 912 count++; 913 count += 2; 914 } 915 return count; 916 } 917 918 // compute size in bytes of sparc frame, given 919 // number of extraWords 920 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 921 922 int nWords = frame::memory_parameter_word_sp_offset; 923 924 nWords += extraWords; 925 926 if (nWords & 1) ++nWords; // round up to double-word 927 928 return nWords * BytesPerWord; 929 } 930 931 932 // save_frame: given number of "extra" words in frame, 933 // issue approp. save instruction (p 200, v8 manual) 934 935 void MacroAssembler::save_frame(int extraWords) { 936 int delta = -total_frame_size_in_bytes(extraWords); 937 if (is_simm13(delta)) { 938 save(SP, delta, SP); 939 } else { 940 set(delta, G3_scratch); 941 save(SP, G3_scratch, SP); 942 } 943 } 944 945 946 void MacroAssembler::save_frame_c1(int size_in_bytes) { 947 if (is_simm13(-size_in_bytes)) { 948 save(SP, -size_in_bytes, SP); 949 } else { 950 set(-size_in_bytes, G3_scratch); 951 save(SP, G3_scratch, SP); 952 } 953 } 954 955 956 void MacroAssembler::save_frame_and_mov(int extraWords, 957 Register s1, Register d1, 958 Register s2, Register d2) { 959 assert_not_delayed(); 960 961 // The trick here is to use precisely the same memory word 962 // that trap handlers also use to save the register. 963 // This word cannot be used for any other purpose, but 964 // it works fine to save the register's value, whether or not 965 // an interrupt flushes register windows at any given moment! 966 Address s1_addr; 967 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 968 s1_addr = s1->address_in_saved_window(); 969 st_ptr(s1, s1_addr); 970 } 971 972 Address s2_addr; 973 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 974 s2_addr = s2->address_in_saved_window(); 975 st_ptr(s2, s2_addr); 976 } 977 978 save_frame(extraWords); 979 980 if (s1_addr.base() == SP) { 981 ld_ptr(s1_addr.after_save(), d1); 982 } else if (s1->is_valid()) { 983 mov(s1->after_save(), d1); 984 } 985 986 if (s2_addr.base() == SP) { 987 ld_ptr(s2_addr.after_save(), d2); 988 } else if (s2->is_valid()) { 989 mov(s2->after_save(), d2); 990 } 991 } 992 993 994 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 995 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 996 int index = oop_recorder()->allocate_metadata_index(obj); 997 RelocationHolder rspec = metadata_Relocation::spec(index); 998 return AddressLiteral((address)obj, rspec); 999 } 1000 1001 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1002 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1003 int index = oop_recorder()->find_index(obj); 1004 RelocationHolder rspec = metadata_Relocation::spec(index); 1005 return AddressLiteral((address)obj, rspec); 1006 } 1007 1008 1009 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1010 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1011 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1012 int oop_index = oop_recorder()->find_index(obj); 1013 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1014 } 1015 1016 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1017 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1018 int oop_index = oop_recorder()->find_index(obj); 1019 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1020 1021 assert_not_delayed(); 1022 // Relocation with special format (see relocInfo_sparc.hpp). 1023 relocate(rspec, 1); 1024 // Assembler::sethi(0x3fffff, d); 1025 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1026 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1027 add(d, 0x3ff, d); 1028 1029 } 1030 1031 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1032 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1033 int klass_index = oop_recorder()->find_index(k); 1034 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1035 narrowOop encoded_k = Klass::encode_klass(k); 1036 1037 assert_not_delayed(); 1038 // Relocation with special format (see relocInfo_sparc.hpp). 1039 relocate(rspec, 1); 1040 // Assembler::sethi(encoded_k, d); 1041 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1042 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1043 add(d, low10(encoded_k), d); 1044 1045 } 1046 1047 void MacroAssembler::align(int modulus) { 1048 while (offset() % modulus != 0) nop(); 1049 } 1050 1051 void RegistersForDebugging::print(outputStream* s) { 1052 FlagSetting fs(Debugging, true); 1053 int j; 1054 for (j = 0; j < 8; ++j) { 1055 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1056 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1057 } 1058 s->cr(); 1059 1060 for (j = 0; j < 8; ++j) { 1061 s->print("l%d = ", j); os::print_location(s, l[j]); 1062 } 1063 s->cr(); 1064 1065 for (j = 0; j < 8; ++j) { 1066 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1067 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1068 } 1069 s->cr(); 1070 1071 for (j = 0; j < 8; ++j) { 1072 s->print("g%d = ", j); os::print_location(s, g[j]); 1073 } 1074 s->cr(); 1075 1076 // print out floats with compression 1077 for (j = 0; j < 32; ) { 1078 jfloat val = f[j]; 1079 int last = j; 1080 for ( ; last+1 < 32; ++last ) { 1081 char b1[1024], b2[1024]; 1082 sprintf(b1, "%f", val); 1083 sprintf(b2, "%f", f[last+1]); 1084 if (strcmp(b1, b2)) 1085 break; 1086 } 1087 s->print("f%d", j); 1088 if ( j != last ) s->print(" - f%d", last); 1089 s->print(" = %f", val); 1090 s->fill_to(25); 1091 s->print_cr(" (0x%x)", *(int*)&val); 1092 j = last + 1; 1093 } 1094 s->cr(); 1095 1096 // and doubles (evens only) 1097 for (j = 0; j < 32; ) { 1098 jdouble val = d[j]; 1099 int last = j; 1100 for ( ; last+1 < 32; ++last ) { 1101 char b1[1024], b2[1024]; 1102 sprintf(b1, "%f", val); 1103 sprintf(b2, "%f", d[last+1]); 1104 if (strcmp(b1, b2)) 1105 break; 1106 } 1107 s->print("d%d", 2 * j); 1108 if ( j != last ) s->print(" - d%d", last); 1109 s->print(" = %f", val); 1110 s->fill_to(30); 1111 s->print("(0x%x)", *(int*)&val); 1112 s->fill_to(42); 1113 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1114 j = last + 1; 1115 } 1116 s->cr(); 1117 } 1118 1119 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1120 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1121 a->flushw(); 1122 int i; 1123 for (i = 0; i < 8; ++i) { 1124 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1125 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1126 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1127 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1128 } 1129 for (i = 0; i < 32; ++i) { 1130 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1131 } 1132 for (i = 0; i < 64; i += 2) { 1133 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1134 } 1135 } 1136 1137 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1138 for (int i = 1; i < 8; ++i) { 1139 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1140 } 1141 for (int j = 0; j < 32; ++j) { 1142 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1143 } 1144 for (int k = 0; k < 64; k += 2) { 1145 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1146 } 1147 } 1148 1149 1150 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1151 void MacroAssembler::push_fTOS() { 1152 // %%%%%% need to implement this 1153 } 1154 1155 // pops double TOS element from CPU stack and pushes on FPU stack 1156 void MacroAssembler::pop_fTOS() { 1157 // %%%%%% need to implement this 1158 } 1159 1160 void MacroAssembler::empty_FPU_stack() { 1161 // %%%%%% need to implement this 1162 } 1163 1164 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1165 // plausibility check for oops 1166 if (!VerifyOops) return; 1167 1168 if (reg == G0) return; // always NULL, which is always an oop 1169 1170 BLOCK_COMMENT("verify_oop {"); 1171 char buffer[64]; 1172 #ifdef COMPILER1 1173 if (CommentedAssembly) { 1174 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1175 block_comment(buffer); 1176 } 1177 #endif 1178 1179 const char* real_msg = NULL; 1180 { 1181 ResourceMark rm; 1182 stringStream ss; 1183 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1184 real_msg = code_string(ss.as_string()); 1185 } 1186 1187 // Call indirectly to solve generation ordering problem 1188 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1189 1190 // Make some space on stack above the current register window. 1191 // Enough to hold 8 64-bit registers. 1192 add(SP,-8*8,SP); 1193 1194 // Save some 64-bit registers; a normal 'save' chops the heads off 1195 // of 64-bit longs in the 32-bit build. 1196 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1197 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1198 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1199 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1200 1201 // Size of set() should stay the same 1202 patchable_set((intptr_t)real_msg, O1); 1203 // Load address to call to into O7 1204 load_ptr_contents(a, O7); 1205 // Register call to verify_oop_subroutine 1206 callr(O7, G0); 1207 delayed()->nop(); 1208 // recover frame size 1209 add(SP, 8*8,SP); 1210 BLOCK_COMMENT("} verify_oop"); 1211 } 1212 1213 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1214 // plausibility check for oops 1215 if (!VerifyOops) return; 1216 1217 const char* real_msg = NULL; 1218 { 1219 ResourceMark rm; 1220 stringStream ss; 1221 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1222 real_msg = code_string(ss.as_string()); 1223 } 1224 1225 // Call indirectly to solve generation ordering problem 1226 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1227 1228 // Make some space on stack above the current register window. 1229 // Enough to hold 8 64-bit registers. 1230 add(SP,-8*8,SP); 1231 1232 // Save some 64-bit registers; a normal 'save' chops the heads off 1233 // of 64-bit longs in the 32-bit build. 1234 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1235 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1236 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1237 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1238 1239 // Size of set() should stay the same 1240 patchable_set((intptr_t)real_msg, O1); 1241 // Load address to call to into O7 1242 load_ptr_contents(a, O7); 1243 // Register call to verify_oop_subroutine 1244 callr(O7, G0); 1245 delayed()->nop(); 1246 // recover frame size 1247 add(SP, 8*8,SP); 1248 } 1249 1250 // side-door communication with signalHandler in os_solaris.cpp 1251 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1252 1253 // This macro is expanded just once; it creates shared code. Contract: 1254 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1255 // registers, including flags. May not use a register 'save', as this blows 1256 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1257 // call. 1258 void MacroAssembler::verify_oop_subroutine() { 1259 // Leaf call; no frame. 1260 Label succeed, fail, null_or_fail; 1261 1262 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1263 // O0 is now the oop to be checked. O7 is the return address. 1264 Register O0_obj = O0; 1265 1266 // Save some more registers for temps. 1267 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1268 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1269 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1270 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1271 1272 // Save flags 1273 Register O5_save_flags = O5; 1274 rdccr( O5_save_flags ); 1275 1276 { // count number of verifies 1277 Register O2_adr = O2; 1278 Register O3_accum = O3; 1279 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1280 } 1281 1282 Register O2_mask = O2; 1283 Register O3_bits = O3; 1284 Register O4_temp = O4; 1285 1286 // mark lower end of faulting range 1287 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1288 _verify_oop_implicit_branch[0] = pc(); 1289 1290 // We can't check the mark oop because it could be in the process of 1291 // locking or unlocking while this is running. 1292 set(Universe::verify_oop_mask (), O2_mask); 1293 set(Universe::verify_oop_bits (), O3_bits); 1294 1295 // assert((obj & oop_mask) == oop_bits); 1296 and3(O0_obj, O2_mask, O4_temp); 1297 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1298 1299 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1300 // the null_or_fail case is useless; must test for null separately 1301 br_null_short(O0_obj, pn, succeed); 1302 } 1303 1304 // Check the Klass* of this object for being in the right area of memory. 1305 // Cannot do the load in the delay above slot in case O0 is null 1306 load_klass(O0_obj, O0_obj); 1307 // assert((klass != NULL) 1308 br_null_short(O0_obj, pn, fail); 1309 1310 wrccr( O5_save_flags ); // Restore CCR's 1311 1312 // mark upper end of faulting range 1313 _verify_oop_implicit_branch[1] = pc(); 1314 1315 //----------------------- 1316 // all tests pass 1317 bind(succeed); 1318 1319 // Restore prior 64-bit registers 1320 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1321 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1322 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1323 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1324 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1325 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1326 1327 retl(); // Leaf return; restore prior O7 in delay slot 1328 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1329 1330 //----------------------- 1331 bind(null_or_fail); // nulls are less common but OK 1332 br_null(O0_obj, false, pt, succeed); 1333 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1334 1335 //----------------------- 1336 // report failure: 1337 bind(fail); 1338 _verify_oop_implicit_branch[2] = pc(); 1339 1340 wrccr( O5_save_flags ); // Restore CCR's 1341 1342 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1343 1344 // stop_subroutine expects message pointer in I1. 1345 mov(I1, O1); 1346 1347 // Restore prior 64-bit registers 1348 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1349 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1350 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1351 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1352 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1353 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1354 1355 // factor long stop-sequence into subroutine to save space 1356 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1357 1358 // call indirectly to solve generation ordering problem 1359 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1360 load_ptr_contents(al, O5); 1361 jmpl(O5, 0, O7); 1362 delayed()->nop(); 1363 } 1364 1365 1366 void MacroAssembler::stop(const char* msg) { 1367 // save frame first to get O7 for return address 1368 // add one word to size in case struct is odd number of words long 1369 // It must be doubleword-aligned for storing doubles into it. 1370 1371 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1372 1373 // stop_subroutine expects message pointer in I1. 1374 // Size of set() should stay the same 1375 patchable_set((intptr_t)msg, O1); 1376 1377 // factor long stop-sequence into subroutine to save space 1378 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1379 1380 // call indirectly to solve generation ordering problem 1381 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1382 load_ptr_contents(a, O5); 1383 jmpl(O5, 0, O7); 1384 delayed()->nop(); 1385 1386 breakpoint_trap(); // make stop actually stop rather than writing 1387 // unnoticeable results in the output files. 1388 1389 // restore(); done in callee to save space! 1390 } 1391 1392 1393 void MacroAssembler::warn(const char* msg) { 1394 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1395 RegistersForDebugging::save_registers(this); 1396 mov(O0, L0); 1397 // Size of set() should stay the same 1398 patchable_set((intptr_t)msg, O0); 1399 call( CAST_FROM_FN_PTR(address, warning) ); 1400 delayed()->nop(); 1401 // ret(); 1402 // delayed()->restore(); 1403 RegistersForDebugging::restore_registers(this, L0); 1404 restore(); 1405 } 1406 1407 1408 void MacroAssembler::untested(const char* what) { 1409 // We must be able to turn interactive prompting off 1410 // in order to run automated test scripts on the VM 1411 // Use the flag ShowMessageBoxOnError 1412 1413 const char* b = NULL; 1414 { 1415 ResourceMark rm; 1416 stringStream ss; 1417 ss.print("untested: %s", what); 1418 b = code_string(ss.as_string()); 1419 } 1420 if (ShowMessageBoxOnError) { STOP(b); } 1421 else { warn(b); } 1422 } 1423 1424 1425 void MacroAssembler::stop_subroutine() { 1426 RegistersForDebugging::save_registers(this); 1427 1428 // for the sake of the debugger, stick a PC on the current frame 1429 // (this assumes that the caller has performed an extra "save") 1430 mov(I7, L7); 1431 add(O7, -7 * BytesPerInt, I7); 1432 1433 save_frame(); // one more save to free up another O7 register 1434 mov(I0, O1); // addr of reg save area 1435 1436 // We expect pointer to message in I1. Caller must set it up in O1 1437 mov(I1, O0); // get msg 1438 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1439 delayed()->nop(); 1440 1441 restore(); 1442 1443 RegistersForDebugging::restore_registers(this, O0); 1444 1445 save_frame(0); 1446 call(CAST_FROM_FN_PTR(address,breakpoint)); 1447 delayed()->nop(); 1448 restore(); 1449 1450 mov(L7, I7); 1451 retl(); 1452 delayed()->restore(); // see stop above 1453 } 1454 1455 1456 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1457 if ( ShowMessageBoxOnError ) { 1458 JavaThread* thread = JavaThread::current(); 1459 JavaThreadState saved_state = thread->thread_state(); 1460 thread->set_thread_state(_thread_in_vm); 1461 { 1462 // In order to get locks work, we need to fake a in_VM state 1463 ttyLocker ttyl; 1464 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1465 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1466 BytecodeCounter::print(); 1467 } 1468 if (os::message_box(msg, "Execution stopped, print registers?")) 1469 regs->print(::tty); 1470 } 1471 BREAKPOINT; 1472 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1473 } 1474 else { 1475 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1476 } 1477 assert(false, "DEBUG MESSAGE: %s", msg); 1478 } 1479 1480 1481 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1482 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1483 Label no_extras; 1484 br( negative, true, pt, no_extras ); // if neg, clear reg 1485 delayed()->set(0, Rresult); // annuled, so only if taken 1486 bind( no_extras ); 1487 } 1488 1489 1490 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1491 #ifdef _LP64 1492 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1493 #else 1494 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); 1495 #endif 1496 bclr(1, Rresult); 1497 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1498 } 1499 1500 1501 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1502 calc_frame_size(Rextra_words, Rresult); 1503 neg(Rresult); 1504 save(SP, Rresult, SP); 1505 } 1506 1507 1508 // --------------------------------------------------------- 1509 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1510 switch (c) { 1511 /*case zero: */ 1512 case Assembler::equal: return Assembler::rc_z; 1513 case Assembler::lessEqual: return Assembler::rc_lez; 1514 case Assembler::less: return Assembler::rc_lz; 1515 /*case notZero:*/ 1516 case Assembler::notEqual: return Assembler::rc_nz; 1517 case Assembler::greater: return Assembler::rc_gz; 1518 case Assembler::greaterEqual: return Assembler::rc_gez; 1519 } 1520 ShouldNotReachHere(); 1521 return Assembler::rc_z; 1522 } 1523 1524 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1525 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1526 tst(s1); 1527 br (c, a, p, L); 1528 } 1529 1530 // Compares a pointer register with zero and branches on null. 1531 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1532 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1533 assert_not_delayed(); 1534 #ifdef _LP64 1535 bpr( rc_z, a, p, s1, L ); 1536 #else 1537 tst(s1); 1538 br ( zero, a, p, L ); 1539 #endif 1540 } 1541 1542 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1543 assert_not_delayed(); 1544 #ifdef _LP64 1545 bpr( rc_nz, a, p, s1, L ); 1546 #else 1547 tst(s1); 1548 br ( notZero, a, p, L ); 1549 #endif 1550 } 1551 1552 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1553 1554 // Compare integer (32 bit) values (icc only). 1555 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1556 Predict p, Label& L) { 1557 assert_not_delayed(); 1558 if (use_cbcond(L)) { 1559 Assembler::cbcond(c, icc, s1, s2, L); 1560 } else { 1561 cmp(s1, s2); 1562 br(c, false, p, L); 1563 delayed()->nop(); 1564 } 1565 } 1566 1567 // Compare integer (32 bit) values (icc only). 1568 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1569 Predict p, Label& L) { 1570 assert_not_delayed(); 1571 if (is_simm(simm13a,5) && use_cbcond(L)) { 1572 Assembler::cbcond(c, icc, s1, simm13a, L); 1573 } else { 1574 cmp(s1, simm13a); 1575 br(c, false, p, L); 1576 delayed()->nop(); 1577 } 1578 } 1579 1580 // Branch that tests xcc in LP64 and icc in !LP64 1581 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1582 Predict p, Label& L) { 1583 assert_not_delayed(); 1584 if (use_cbcond(L)) { 1585 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1586 } else { 1587 cmp(s1, s2); 1588 brx(c, false, p, L); 1589 delayed()->nop(); 1590 } 1591 } 1592 1593 // Branch that tests xcc in LP64 and icc in !LP64 1594 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1595 Predict p, Label& L) { 1596 assert_not_delayed(); 1597 if (is_simm(simm13a,5) && use_cbcond(L)) { 1598 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1599 } else { 1600 cmp(s1, simm13a); 1601 brx(c, false, p, L); 1602 delayed()->nop(); 1603 } 1604 } 1605 1606 // Short branch version for compares a pointer with zero. 1607 1608 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1609 assert_not_delayed(); 1610 if (use_cbcond(L)) { 1611 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1612 return; 1613 } 1614 br_null(s1, false, p, L); 1615 delayed()->nop(); 1616 } 1617 1618 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1619 assert_not_delayed(); 1620 if (use_cbcond(L)) { 1621 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1622 return; 1623 } 1624 br_notnull(s1, false, p, L); 1625 delayed()->nop(); 1626 } 1627 1628 // Unconditional short branch 1629 void MacroAssembler::ba_short(Label& L) { 1630 if (use_cbcond(L)) { 1631 Assembler::cbcond(equal, icc, G0, G0, L); 1632 return; 1633 } 1634 br(always, false, pt, L); 1635 delayed()->nop(); 1636 } 1637 1638 // instruction sequences factored across compiler & interpreter 1639 1640 1641 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1642 Register Rb_hi, Register Rb_low, 1643 Register Rresult) { 1644 1645 Label check_low_parts, done; 1646 1647 cmp(Ra_hi, Rb_hi ); // compare hi parts 1648 br(equal, true, pt, check_low_parts); 1649 delayed()->cmp(Ra_low, Rb_low); // test low parts 1650 1651 // And, with an unsigned comparison, it does not matter if the numbers 1652 // are negative or not. 1653 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1654 // The second one is bigger (unsignedly). 1655 1656 // Other notes: The first move in each triplet can be unconditional 1657 // (and therefore probably prefetchable). 1658 // And the equals case for the high part does not need testing, 1659 // since that triplet is reached only after finding the high halves differ. 1660 1661 mov(-1, Rresult); 1662 ba(done); 1663 delayed()->movcc(greater, false, icc, 1, Rresult); 1664 1665 bind(check_low_parts); 1666 1667 mov( -1, Rresult); 1668 movcc(equal, false, icc, 0, Rresult); 1669 movcc(greaterUnsigned, false, icc, 1, Rresult); 1670 1671 bind(done); 1672 } 1673 1674 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1675 subcc( G0, Rlow, Rlow ); 1676 subc( G0, Rhi, Rhi ); 1677 } 1678 1679 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1680 Register Rcount, 1681 Register Rout_high, Register Rout_low, 1682 Register Rtemp ) { 1683 1684 1685 Register Ralt_count = Rtemp; 1686 Register Rxfer_bits = Rtemp; 1687 1688 assert( Ralt_count != Rin_high 1689 && Ralt_count != Rin_low 1690 && Ralt_count != Rcount 1691 && Rxfer_bits != Rin_low 1692 && Rxfer_bits != Rin_high 1693 && Rxfer_bits != Rcount 1694 && Rxfer_bits != Rout_low 1695 && Rout_low != Rin_high, 1696 "register alias checks"); 1697 1698 Label big_shift, done; 1699 1700 // This code can be optimized to use the 64 bit shifts in V9. 1701 // Here we use the 32 bit shifts. 1702 1703 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1704 subcc(Rcount, 31, Ralt_count); 1705 br(greater, true, pn, big_shift); 1706 delayed()->dec(Ralt_count); 1707 1708 // shift < 32 bits, Ralt_count = Rcount-31 1709 1710 // We get the transfer bits by shifting right by 32-count the low 1711 // register. This is done by shifting right by 31-count and then by one 1712 // more to take care of the special (rare) case where count is zero 1713 // (shifting by 32 would not work). 1714 1715 neg(Ralt_count); 1716 1717 // The order of the next two instructions is critical in the case where 1718 // Rin and Rout are the same and should not be reversed. 1719 1720 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1721 if (Rcount != Rout_low) { 1722 sll(Rin_low, Rcount, Rout_low); // low half 1723 } 1724 sll(Rin_high, Rcount, Rout_high); 1725 if (Rcount == Rout_low) { 1726 sll(Rin_low, Rcount, Rout_low); // low half 1727 } 1728 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1729 ba(done); 1730 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1731 1732 // shift >= 32 bits, Ralt_count = Rcount-32 1733 bind(big_shift); 1734 sll(Rin_low, Ralt_count, Rout_high ); 1735 clr(Rout_low); 1736 1737 bind(done); 1738 } 1739 1740 1741 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1742 Register Rcount, 1743 Register Rout_high, Register Rout_low, 1744 Register Rtemp ) { 1745 1746 Register Ralt_count = Rtemp; 1747 Register Rxfer_bits = Rtemp; 1748 1749 assert( Ralt_count != Rin_high 1750 && Ralt_count != Rin_low 1751 && Ralt_count != Rcount 1752 && Rxfer_bits != Rin_low 1753 && Rxfer_bits != Rin_high 1754 && Rxfer_bits != Rcount 1755 && Rxfer_bits != Rout_high 1756 && Rout_high != Rin_low, 1757 "register alias checks"); 1758 1759 Label big_shift, done; 1760 1761 // This code can be optimized to use the 64 bit shifts in V9. 1762 // Here we use the 32 bit shifts. 1763 1764 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1765 subcc(Rcount, 31, Ralt_count); 1766 br(greater, true, pn, big_shift); 1767 delayed()->dec(Ralt_count); 1768 1769 // shift < 32 bits, Ralt_count = Rcount-31 1770 1771 // We get the transfer bits by shifting left by 32-count the high 1772 // register. This is done by shifting left by 31-count and then by one 1773 // more to take care of the special (rare) case where count is zero 1774 // (shifting by 32 would not work). 1775 1776 neg(Ralt_count); 1777 if (Rcount != Rout_low) { 1778 srl(Rin_low, Rcount, Rout_low); 1779 } 1780 1781 // The order of the next two instructions is critical in the case where 1782 // Rin and Rout are the same and should not be reversed. 1783 1784 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1785 sra(Rin_high, Rcount, Rout_high ); // high half 1786 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1787 if (Rcount == Rout_low) { 1788 srl(Rin_low, Rcount, Rout_low); 1789 } 1790 ba(done); 1791 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1792 1793 // shift >= 32 bits, Ralt_count = Rcount-32 1794 bind(big_shift); 1795 1796 sra(Rin_high, Ralt_count, Rout_low); 1797 sra(Rin_high, 31, Rout_high); // sign into hi 1798 1799 bind( done ); 1800 } 1801 1802 1803 1804 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1805 Register Rcount, 1806 Register Rout_high, Register Rout_low, 1807 Register Rtemp ) { 1808 1809 Register Ralt_count = Rtemp; 1810 Register Rxfer_bits = Rtemp; 1811 1812 assert( Ralt_count != Rin_high 1813 && Ralt_count != Rin_low 1814 && Ralt_count != Rcount 1815 && Rxfer_bits != Rin_low 1816 && Rxfer_bits != Rin_high 1817 && Rxfer_bits != Rcount 1818 && Rxfer_bits != Rout_high 1819 && Rout_high != Rin_low, 1820 "register alias checks"); 1821 1822 Label big_shift, done; 1823 1824 // This code can be optimized to use the 64 bit shifts in V9. 1825 // Here we use the 32 bit shifts. 1826 1827 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1828 subcc(Rcount, 31, Ralt_count); 1829 br(greater, true, pn, big_shift); 1830 delayed()->dec(Ralt_count); 1831 1832 // shift < 32 bits, Ralt_count = Rcount-31 1833 1834 // We get the transfer bits by shifting left by 32-count the high 1835 // register. This is done by shifting left by 31-count and then by one 1836 // more to take care of the special (rare) case where count is zero 1837 // (shifting by 32 would not work). 1838 1839 neg(Ralt_count); 1840 if (Rcount != Rout_low) { 1841 srl(Rin_low, Rcount, Rout_low); 1842 } 1843 1844 // The order of the next two instructions is critical in the case where 1845 // Rin and Rout are the same and should not be reversed. 1846 1847 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1848 srl(Rin_high, Rcount, Rout_high ); // high half 1849 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1850 if (Rcount == Rout_low) { 1851 srl(Rin_low, Rcount, Rout_low); 1852 } 1853 ba(done); 1854 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1855 1856 // shift >= 32 bits, Ralt_count = Rcount-32 1857 bind(big_shift); 1858 1859 srl(Rin_high, Ralt_count, Rout_low); 1860 clr(Rout_high); 1861 1862 bind( done ); 1863 } 1864 1865 #ifdef _LP64 1866 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1867 cmp(Ra, Rb); 1868 mov(-1, Rresult); 1869 movcc(equal, false, xcc, 0, Rresult); 1870 movcc(greater, false, xcc, 1, Rresult); 1871 } 1872 #endif 1873 1874 1875 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1876 switch (size_in_bytes) { 1877 case 8: ld_long(src, dst); break; 1878 case 4: ld( src, dst); break; 1879 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1880 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1881 default: ShouldNotReachHere(); 1882 } 1883 } 1884 1885 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1886 switch (size_in_bytes) { 1887 case 8: st_long(src, dst); break; 1888 case 4: st( src, dst); break; 1889 case 2: sth( src, dst); break; 1890 case 1: stb( src, dst); break; 1891 default: ShouldNotReachHere(); 1892 } 1893 } 1894 1895 1896 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1897 FloatRegister Fa, FloatRegister Fb, 1898 Register Rresult) { 1899 if (is_float) { 1900 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1901 } else { 1902 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1903 } 1904 1905 if (unordered_result == 1) { 1906 mov( -1, Rresult); 1907 movcc(f_equal, true, fcc0, 0, Rresult); 1908 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1909 } else { 1910 mov( -1, Rresult); 1911 movcc(f_equal, true, fcc0, 0, Rresult); 1912 movcc(f_greater, true, fcc0, 1, Rresult); 1913 } 1914 } 1915 1916 1917 void MacroAssembler::save_all_globals_into_locals() { 1918 mov(G1,L1); 1919 mov(G2,L2); 1920 mov(G3,L3); 1921 mov(G4,L4); 1922 mov(G5,L5); 1923 mov(G6,L6); 1924 mov(G7,L7); 1925 } 1926 1927 void MacroAssembler::restore_globals_from_locals() { 1928 mov(L1,G1); 1929 mov(L2,G2); 1930 mov(L3,G3); 1931 mov(L4,G4); 1932 mov(L5,G5); 1933 mov(L6,G6); 1934 mov(L7,G7); 1935 } 1936 1937 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1938 Register tmp, 1939 int offset) { 1940 intptr_t value = *delayed_value_addr; 1941 if (value != 0) 1942 return RegisterOrConstant(value + offset); 1943 1944 // load indirectly to solve generation ordering problem 1945 AddressLiteral a(delayed_value_addr); 1946 load_ptr_contents(a, tmp); 1947 1948 #ifdef ASSERT 1949 tst(tmp); 1950 breakpoint_trap(zero, xcc); 1951 #endif 1952 1953 if (offset != 0) 1954 add(tmp, offset, tmp); 1955 1956 return RegisterOrConstant(tmp); 1957 } 1958 1959 1960 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1961 assert(d.register_or_noreg() != G0, "lost side effect"); 1962 if ((s2.is_constant() && s2.as_constant() == 0) || 1963 (s2.is_register() && s2.as_register() == G0)) { 1964 // Do nothing, just move value. 1965 if (s1.is_register()) { 1966 if (d.is_constant()) d = temp; 1967 mov(s1.as_register(), d.as_register()); 1968 return d; 1969 } else { 1970 return s1; 1971 } 1972 } 1973 1974 if (s1.is_register()) { 1975 assert_different_registers(s1.as_register(), temp); 1976 if (d.is_constant()) d = temp; 1977 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1978 return d; 1979 } else { 1980 if (s2.is_register()) { 1981 assert_different_registers(s2.as_register(), temp); 1982 if (d.is_constant()) d = temp; 1983 set(s1.as_constant(), temp); 1984 andn(temp, s2.as_register(), d.as_register()); 1985 return d; 1986 } else { 1987 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1988 return res; 1989 } 1990 } 1991 } 1992 1993 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1994 assert(d.register_or_noreg() != G0, "lost side effect"); 1995 if ((s2.is_constant() && s2.as_constant() == 0) || 1996 (s2.is_register() && s2.as_register() == G0)) { 1997 // Do nothing, just move value. 1998 if (s1.is_register()) { 1999 if (d.is_constant()) d = temp; 2000 mov(s1.as_register(), d.as_register()); 2001 return d; 2002 } else { 2003 return s1; 2004 } 2005 } 2006 2007 if (s1.is_register()) { 2008 assert_different_registers(s1.as_register(), temp); 2009 if (d.is_constant()) d = temp; 2010 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2011 return d; 2012 } else { 2013 if (s2.is_register()) { 2014 assert_different_registers(s2.as_register(), temp); 2015 if (d.is_constant()) d = temp; 2016 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2017 return d; 2018 } else { 2019 intptr_t res = s1.as_constant() + s2.as_constant(); 2020 return res; 2021 } 2022 } 2023 } 2024 2025 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2026 assert(d.register_or_noreg() != G0, "lost side effect"); 2027 if (!is_simm13(s2.constant_or_zero())) 2028 s2 = (s2.as_constant() & 0xFF); 2029 if ((s2.is_constant() && s2.as_constant() == 0) || 2030 (s2.is_register() && s2.as_register() == G0)) { 2031 // Do nothing, just move value. 2032 if (s1.is_register()) { 2033 if (d.is_constant()) d = temp; 2034 mov(s1.as_register(), d.as_register()); 2035 return d; 2036 } else { 2037 return s1; 2038 } 2039 } 2040 2041 if (s1.is_register()) { 2042 assert_different_registers(s1.as_register(), temp); 2043 if (d.is_constant()) d = temp; 2044 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2045 return d; 2046 } else { 2047 if (s2.is_register()) { 2048 assert_different_registers(s2.as_register(), temp); 2049 if (d.is_constant()) d = temp; 2050 set(s1.as_constant(), temp); 2051 sll_ptr(temp, s2.as_register(), d.as_register()); 2052 return d; 2053 } else { 2054 intptr_t res = s1.as_constant() << s2.as_constant(); 2055 return res; 2056 } 2057 } 2058 } 2059 2060 2061 // Look up the method for a megamorphic invokeinterface call. 2062 // The target method is determined by <intf_klass, itable_index>. 2063 // The receiver klass is in recv_klass. 2064 // On success, the result will be in method_result, and execution falls through. 2065 // On failure, execution transfers to the given label. 2066 void MacroAssembler::lookup_interface_method(Register recv_klass, 2067 Register intf_klass, 2068 RegisterOrConstant itable_index, 2069 Register method_result, 2070 Register scan_temp, 2071 Register sethi_temp, 2072 Label& L_no_such_interface) { 2073 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2074 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2075 "caller must use same register for non-constant itable index as for method"); 2076 2077 Label L_no_such_interface_restore; 2078 bool did_save = false; 2079 if (scan_temp == noreg || sethi_temp == noreg) { 2080 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2081 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2082 assert(method_result->is_global(), "must be able to return value"); 2083 scan_temp = L2; 2084 sethi_temp = L3; 2085 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2086 recv_klass = recv_2; 2087 intf_klass = intf_2; 2088 did_save = true; 2089 } 2090 2091 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2092 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2093 int scan_step = itableOffsetEntry::size() * wordSize; 2094 int vte_size = vtableEntry::size_in_bytes(); 2095 2096 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2097 // %%% We should store the aligned, prescaled offset in the klassoop. 2098 // Then the next several instructions would fold away. 2099 2100 int itb_offset = vtable_base; 2101 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2102 sll(scan_temp, itb_scale, scan_temp); 2103 add(scan_temp, itb_offset, scan_temp); 2104 add(recv_klass, scan_temp, scan_temp); 2105 2106 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2107 RegisterOrConstant itable_offset = itable_index; 2108 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2109 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2110 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2111 2112 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2113 // if (scan->interface() == intf) { 2114 // result = (klass + scan->offset() + itable_index); 2115 // } 2116 // } 2117 Label L_search, L_found_method; 2118 2119 for (int peel = 1; peel >= 0; peel--) { 2120 // %%%% Could load both offset and interface in one ldx, if they were 2121 // in the opposite order. This would save a load. 2122 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2123 2124 // Check that this entry is non-null. A null entry means that 2125 // the receiver class doesn't implement the interface, and wasn't the 2126 // same as when the caller was compiled. 2127 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2128 delayed()->cmp(method_result, intf_klass); 2129 2130 if (peel) { 2131 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2132 } else { 2133 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2134 // (invert the test to fall through to found_method...) 2135 } 2136 delayed()->add(scan_temp, scan_step, scan_temp); 2137 2138 if (!peel) break; 2139 2140 bind(L_search); 2141 } 2142 2143 bind(L_found_method); 2144 2145 // Got a hit. 2146 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2147 // scan_temp[-scan_step] points to the vtable offset we need 2148 ito_offset -= scan_step; 2149 lduw(scan_temp, ito_offset, scan_temp); 2150 ld_ptr(recv_klass, scan_temp, method_result); 2151 2152 if (did_save) { 2153 Label L_done; 2154 ba(L_done); 2155 delayed()->restore(); 2156 2157 bind(L_no_such_interface_restore); 2158 ba(L_no_such_interface); 2159 delayed()->restore(); 2160 2161 bind(L_done); 2162 } 2163 } 2164 2165 2166 // virtual method calling 2167 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2168 RegisterOrConstant vtable_index, 2169 Register method_result) { 2170 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2171 Register sethi_temp = method_result; 2172 const int base = in_bytes(Klass::vtable_start_offset()) + 2173 // method pointer offset within the vtable entry: 2174 vtableEntry::method_offset_in_bytes(); 2175 RegisterOrConstant vtable_offset = vtable_index; 2176 // Each of the following three lines potentially generates an instruction. 2177 // But the total number of address formation instructions will always be 2178 // at most two, and will often be zero. In any case, it will be optimal. 2179 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2180 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2181 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2182 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2183 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2184 ld_ptr(vtable_entry_addr, method_result); 2185 } 2186 2187 2188 void MacroAssembler::check_klass_subtype(Register sub_klass, 2189 Register super_klass, 2190 Register temp_reg, 2191 Register temp2_reg, 2192 Label& L_success) { 2193 Register sub_2 = sub_klass; 2194 Register sup_2 = super_klass; 2195 if (!sub_2->is_global()) sub_2 = L0; 2196 if (!sup_2->is_global()) sup_2 = L1; 2197 bool did_save = false; 2198 if (temp_reg == noreg || temp2_reg == noreg) { 2199 temp_reg = L2; 2200 temp2_reg = L3; 2201 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2202 sub_klass = sub_2; 2203 super_klass = sup_2; 2204 did_save = true; 2205 } 2206 Label L_failure, L_pop_to_failure, L_pop_to_success; 2207 check_klass_subtype_fast_path(sub_klass, super_klass, 2208 temp_reg, temp2_reg, 2209 (did_save ? &L_pop_to_success : &L_success), 2210 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2211 2212 if (!did_save) 2213 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2214 check_klass_subtype_slow_path(sub_2, sup_2, 2215 L2, L3, L4, L5, 2216 NULL, &L_pop_to_failure); 2217 2218 // on success: 2219 bind(L_pop_to_success); 2220 restore(); 2221 ba_short(L_success); 2222 2223 // on failure: 2224 bind(L_pop_to_failure); 2225 restore(); 2226 bind(L_failure); 2227 } 2228 2229 2230 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2231 Register super_klass, 2232 Register temp_reg, 2233 Register temp2_reg, 2234 Label* L_success, 2235 Label* L_failure, 2236 Label* L_slow_path, 2237 RegisterOrConstant super_check_offset) { 2238 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2239 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2240 2241 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2242 bool need_slow_path = (must_load_sco || 2243 super_check_offset.constant_or_zero() == sco_offset); 2244 2245 assert_different_registers(sub_klass, super_klass, temp_reg); 2246 if (super_check_offset.is_register()) { 2247 assert_different_registers(sub_klass, super_klass, temp_reg, 2248 super_check_offset.as_register()); 2249 } else if (must_load_sco) { 2250 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2251 } 2252 2253 Label L_fallthrough; 2254 int label_nulls = 0; 2255 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2256 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2257 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2258 assert(label_nulls <= 1 || 2259 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2260 "at most one NULL in the batch, usually"); 2261 2262 // If the pointers are equal, we are done (e.g., String[] elements). 2263 // This self-check enables sharing of secondary supertype arrays among 2264 // non-primary types such as array-of-interface. Otherwise, each such 2265 // type would need its own customized SSA. 2266 // We move this check to the front of the fast path because many 2267 // type checks are in fact trivially successful in this manner, 2268 // so we get a nicely predicted branch right at the start of the check. 2269 cmp(super_klass, sub_klass); 2270 brx(Assembler::equal, false, Assembler::pn, *L_success); 2271 delayed()->nop(); 2272 2273 // Check the supertype display: 2274 if (must_load_sco) { 2275 // The super check offset is always positive... 2276 lduw(super_klass, sco_offset, temp2_reg); 2277 super_check_offset = RegisterOrConstant(temp2_reg); 2278 // super_check_offset is register. 2279 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2280 } 2281 ld_ptr(sub_klass, super_check_offset, temp_reg); 2282 cmp(super_klass, temp_reg); 2283 2284 // This check has worked decisively for primary supers. 2285 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2286 // (Secondary supers are interfaces and very deeply nested subtypes.) 2287 // This works in the same check above because of a tricky aliasing 2288 // between the super_cache and the primary super display elements. 2289 // (The 'super_check_addr' can address either, as the case requires.) 2290 // Note that the cache is updated below if it does not help us find 2291 // what we need immediately. 2292 // So if it was a primary super, we can just fail immediately. 2293 // Otherwise, it's the slow path for us (no success at this point). 2294 2295 // Hacked ba(), which may only be used just before L_fallthrough. 2296 #define FINAL_JUMP(label) \ 2297 if (&(label) != &L_fallthrough) { \ 2298 ba(label); delayed()->nop(); \ 2299 } 2300 2301 if (super_check_offset.is_register()) { 2302 brx(Assembler::equal, false, Assembler::pn, *L_success); 2303 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2304 2305 if (L_failure == &L_fallthrough) { 2306 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2307 delayed()->nop(); 2308 } else { 2309 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2310 delayed()->nop(); 2311 FINAL_JUMP(*L_slow_path); 2312 } 2313 } else if (super_check_offset.as_constant() == sc_offset) { 2314 // Need a slow path; fast failure is impossible. 2315 if (L_slow_path == &L_fallthrough) { 2316 brx(Assembler::equal, false, Assembler::pt, *L_success); 2317 delayed()->nop(); 2318 } else { 2319 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2320 delayed()->nop(); 2321 FINAL_JUMP(*L_success); 2322 } 2323 } else { 2324 // No slow path; it's a fast decision. 2325 if (L_failure == &L_fallthrough) { 2326 brx(Assembler::equal, false, Assembler::pt, *L_success); 2327 delayed()->nop(); 2328 } else { 2329 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2330 delayed()->nop(); 2331 FINAL_JUMP(*L_success); 2332 } 2333 } 2334 2335 bind(L_fallthrough); 2336 2337 #undef FINAL_JUMP 2338 } 2339 2340 2341 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2342 Register super_klass, 2343 Register count_temp, 2344 Register scan_temp, 2345 Register scratch_reg, 2346 Register coop_reg, 2347 Label* L_success, 2348 Label* L_failure) { 2349 assert_different_registers(sub_klass, super_klass, 2350 count_temp, scan_temp, scratch_reg, coop_reg); 2351 2352 Label L_fallthrough, L_loop; 2353 int label_nulls = 0; 2354 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2355 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2356 assert(label_nulls <= 1, "at most one NULL in the batch"); 2357 2358 // a couple of useful fields in sub_klass: 2359 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2360 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2361 2362 // Do a linear scan of the secondary super-klass chain. 2363 // This code is rarely used, so simplicity is a virtue here. 2364 2365 #ifndef PRODUCT 2366 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2367 inc_counter((address) pst_counter, count_temp, scan_temp); 2368 #endif 2369 2370 // We will consult the secondary-super array. 2371 ld_ptr(sub_klass, ss_offset, scan_temp); 2372 2373 Register search_key = super_klass; 2374 2375 // Load the array length. (Positive movl does right thing on LP64.) 2376 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2377 2378 // Check for empty secondary super list 2379 tst(count_temp); 2380 2381 // In the array of super classes elements are pointer sized. 2382 int element_size = wordSize; 2383 2384 // Top of search loop 2385 bind(L_loop); 2386 br(Assembler::equal, false, Assembler::pn, *L_failure); 2387 delayed()->add(scan_temp, element_size, scan_temp); 2388 2389 // Skip the array header in all array accesses. 2390 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2391 elem_offset -= element_size; // the scan pointer was pre-incremented also 2392 2393 // Load next super to check 2394 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2395 2396 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2397 cmp(scratch_reg, search_key); 2398 2399 // A miss means we are NOT a subtype and need to keep looping 2400 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2401 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2402 2403 // Success. Cache the super we found and proceed in triumph. 2404 st_ptr(super_klass, sub_klass, sc_offset); 2405 2406 if (L_success != &L_fallthrough) { 2407 ba(*L_success); 2408 delayed()->nop(); 2409 } 2410 2411 bind(L_fallthrough); 2412 } 2413 2414 2415 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2416 Register temp_reg, 2417 int extra_slot_offset) { 2418 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2419 int stackElementSize = Interpreter::stackElementSize; 2420 int offset = extra_slot_offset * stackElementSize; 2421 if (arg_slot.is_constant()) { 2422 offset += arg_slot.as_constant() * stackElementSize; 2423 return offset; 2424 } else { 2425 assert(temp_reg != noreg, "must specify"); 2426 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2427 if (offset != 0) 2428 add(temp_reg, offset, temp_reg); 2429 return temp_reg; 2430 } 2431 } 2432 2433 2434 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2435 Register temp_reg, 2436 int extra_slot_offset) { 2437 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2438 } 2439 2440 2441 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2442 Register temp_reg, 2443 Label& done, Label* slow_case, 2444 BiasedLockingCounters* counters) { 2445 assert(UseBiasedLocking, "why call this otherwise?"); 2446 2447 if (PrintBiasedLockingStatistics) { 2448 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2449 if (counters == NULL) 2450 counters = BiasedLocking::counters(); 2451 } 2452 2453 Label cas_label; 2454 2455 // Biased locking 2456 // See whether the lock is currently biased toward our thread and 2457 // whether the epoch is still valid 2458 // Note that the runtime guarantees sufficient alignment of JavaThread 2459 // pointers to allow age to be placed into low bits 2460 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2461 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2462 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2463 2464 load_klass(obj_reg, temp_reg); 2465 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2466 or3(G2_thread, temp_reg, temp_reg); 2467 xor3(mark_reg, temp_reg, temp_reg); 2468 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2469 if (counters != NULL) { 2470 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2471 // Reload mark_reg as we may need it later 2472 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2473 } 2474 brx(Assembler::equal, true, Assembler::pt, done); 2475 delayed()->nop(); 2476 2477 Label try_revoke_bias; 2478 Label try_rebias; 2479 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2480 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2481 2482 // At this point we know that the header has the bias pattern and 2483 // that we are not the bias owner in the current epoch. We need to 2484 // figure out more details about the state of the header in order to 2485 // know what operations can be legally performed on the object's 2486 // header. 2487 2488 // If the low three bits in the xor result aren't clear, that means 2489 // the prototype header is no longer biased and we have to revoke 2490 // the bias on this object. 2491 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2492 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2493 2494 // Biasing is still enabled for this data type. See whether the 2495 // epoch of the current bias is still valid, meaning that the epoch 2496 // bits of the mark word are equal to the epoch bits of the 2497 // prototype header. (Note that the prototype header's epoch bits 2498 // only change at a safepoint.) If not, attempt to rebias the object 2499 // toward the current thread. Note that we must be absolutely sure 2500 // that the current epoch is invalid in order to do this because 2501 // otherwise the manipulations it performs on the mark word are 2502 // illegal. 2503 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2504 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2505 2506 // The epoch of the current bias is still valid but we know nothing 2507 // about the owner; it might be set or it might be clear. Try to 2508 // acquire the bias of the object using an atomic operation. If this 2509 // fails we will go in to the runtime to revoke the object's bias. 2510 // Note that we first construct the presumed unbiased header so we 2511 // don't accidentally blow away another thread's valid bias. 2512 delayed()->and3(mark_reg, 2513 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2514 mark_reg); 2515 or3(G2_thread, mark_reg, temp_reg); 2516 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2517 // If the biasing toward our thread failed, this means that 2518 // another thread succeeded in biasing it toward itself and we 2519 // need to revoke that bias. The revocation will occur in the 2520 // interpreter runtime in the slow case. 2521 cmp(mark_reg, temp_reg); 2522 if (counters != NULL) { 2523 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2524 } 2525 if (slow_case != NULL) { 2526 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2527 delayed()->nop(); 2528 } 2529 ba_short(done); 2530 2531 bind(try_rebias); 2532 // At this point we know the epoch has expired, meaning that the 2533 // current "bias owner", if any, is actually invalid. Under these 2534 // circumstances _only_, we are allowed to use the current header's 2535 // value as the comparison value when doing the cas to acquire the 2536 // bias in the current epoch. In other words, we allow transfer of 2537 // the bias from one thread to another directly in this situation. 2538 // 2539 // FIXME: due to a lack of registers we currently blow away the age 2540 // bits in this situation. Should attempt to preserve them. 2541 load_klass(obj_reg, temp_reg); 2542 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2543 or3(G2_thread, temp_reg, temp_reg); 2544 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2545 // If the biasing toward our thread failed, this means that 2546 // another thread succeeded in biasing it toward itself and we 2547 // need to revoke that bias. The revocation will occur in the 2548 // interpreter runtime in the slow case. 2549 cmp(mark_reg, temp_reg); 2550 if (counters != NULL) { 2551 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2552 } 2553 if (slow_case != NULL) { 2554 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2555 delayed()->nop(); 2556 } 2557 ba_short(done); 2558 2559 bind(try_revoke_bias); 2560 // The prototype mark in the klass doesn't have the bias bit set any 2561 // more, indicating that objects of this data type are not supposed 2562 // to be biased any more. We are going to try to reset the mark of 2563 // this object to the prototype value and fall through to the 2564 // CAS-based locking scheme. Note that if our CAS fails, it means 2565 // that another thread raced us for the privilege of revoking the 2566 // bias of this particular object, so it's okay to continue in the 2567 // normal locking code. 2568 // 2569 // FIXME: due to a lack of registers we currently blow away the age 2570 // bits in this situation. Should attempt to preserve them. 2571 load_klass(obj_reg, temp_reg); 2572 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2573 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2574 // Fall through to the normal CAS-based lock, because no matter what 2575 // the result of the above CAS, some thread must have succeeded in 2576 // removing the bias bit from the object's header. 2577 if (counters != NULL) { 2578 cmp(mark_reg, temp_reg); 2579 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2580 } 2581 2582 bind(cas_label); 2583 } 2584 2585 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2586 bool allow_delay_slot_filling) { 2587 // Check for biased locking unlock case, which is a no-op 2588 // Note: we do not have to check the thread ID for two reasons. 2589 // First, the interpreter checks for IllegalMonitorStateException at 2590 // a higher level. Second, if the bias was revoked while we held the 2591 // lock, the object could not be rebiased toward another thread, so 2592 // the bias bit would be clear. 2593 ld_ptr(mark_addr, temp_reg); 2594 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2595 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2596 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2597 delayed(); 2598 if (!allow_delay_slot_filling) { 2599 nop(); 2600 } 2601 } 2602 2603 2604 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2605 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2606 // The code could be tightened up considerably. 2607 // 2608 // box->dhw disposition - post-conditions at DONE_LABEL. 2609 // - Successful inflated lock: box->dhw != 0. 2610 // Any non-zero value suffices. 2611 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2612 // - Successful Stack-lock: box->dhw == mark. 2613 // box->dhw must contain the displaced mark word value 2614 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2615 // The slow-path fast_enter() and slow_enter() operators 2616 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2617 // - Biased: box->dhw is undefined 2618 // 2619 // SPARC refworkload performance - specifically jetstream and scimark - are 2620 // extremely sensitive to the size of the code emitted by compiler_lock_object 2621 // and compiler_unlock_object. Critically, the key factor is code size, not path 2622 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2623 // effect). 2624 2625 2626 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2627 Register Rbox, Register Rscratch, 2628 BiasedLockingCounters* counters, 2629 bool try_bias) { 2630 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2631 2632 verify_oop(Roop); 2633 Label done ; 2634 2635 if (counters != NULL) { 2636 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2637 } 2638 2639 if (EmitSync & 1) { 2640 mov(3, Rscratch); 2641 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2642 cmp(SP, G0); 2643 return ; 2644 } 2645 2646 if (EmitSync & 2) { 2647 2648 // Fetch object's markword 2649 ld_ptr(mark_addr, Rmark); 2650 2651 if (try_bias) { 2652 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2653 } 2654 2655 // Save Rbox in Rscratch to be used for the cas operation 2656 mov(Rbox, Rscratch); 2657 2658 // set Rmark to markOop | markOopDesc::unlocked_value 2659 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2660 2661 // Initialize the box. (Must happen before we update the object mark!) 2662 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2663 2664 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2665 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2666 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2667 2668 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2669 // hence we are done 2670 cmp(Rmark, Rscratch); 2671 #ifdef _LP64 2672 sub(Rscratch, STACK_BIAS, Rscratch); 2673 #endif 2674 brx(Assembler::equal, false, Assembler::pt, done); 2675 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2676 2677 // we did not find an unlocked object so see if this is a recursive case 2678 // sub(Rscratch, SP, Rscratch); 2679 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2680 andcc(Rscratch, 0xfffff003, Rscratch); 2681 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2682 bind (done); 2683 return ; 2684 } 2685 2686 Label Egress ; 2687 2688 if (EmitSync & 256) { 2689 Label IsInflated ; 2690 2691 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2692 // Triage: biased, stack-locked, neutral, inflated 2693 if (try_bias) { 2694 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2695 // Invariant: if control reaches this point in the emitted stream 2696 // then Rmark has not been modified. 2697 } 2698 2699 // Store mark into displaced mark field in the on-stack basic-lock "box" 2700 // Critically, this must happen before the CAS 2701 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2702 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2703 andcc(Rmark, 2, G0); 2704 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2705 delayed()-> 2706 2707 // Try stack-lock acquisition. 2708 // Beware: the 1st instruction is in a delay slot 2709 mov(Rbox, Rscratch); 2710 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2711 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2712 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2713 cmp(Rmark, Rscratch); 2714 brx(Assembler::equal, false, Assembler::pt, done); 2715 delayed()->sub(Rscratch, SP, Rscratch); 2716 2717 // Stack-lock attempt failed - check for recursive stack-lock. 2718 // See the comments below about how we might remove this case. 2719 #ifdef _LP64 2720 sub(Rscratch, STACK_BIAS, Rscratch); 2721 #endif 2722 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2723 andcc(Rscratch, 0xfffff003, Rscratch); 2724 br(Assembler::always, false, Assembler::pt, done); 2725 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2726 2727 bind(IsInflated); 2728 if (EmitSync & 64) { 2729 // If m->owner != null goto IsLocked 2730 // Pessimistic form: Test-and-CAS vs CAS 2731 // The optimistic form avoids RTS->RTO cache line upgrades. 2732 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2733 andcc(Rscratch, Rscratch, G0); 2734 brx(Assembler::notZero, false, Assembler::pn, done); 2735 delayed()->nop(); 2736 // m->owner == null : it's unlocked. 2737 } 2738 2739 // Try to CAS m->owner from null to Self 2740 // Invariant: if we acquire the lock then _recursions should be 0. 2741 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2742 mov(G2_thread, Rscratch); 2743 cas_ptr(Rmark, G0, Rscratch); 2744 cmp(Rscratch, G0); 2745 // Intentional fall-through into done 2746 } else { 2747 // Aggressively avoid the Store-before-CAS penalty 2748 // Defer the store into box->dhw until after the CAS 2749 Label IsInflated, Recursive ; 2750 2751 // Anticipate CAS -- Avoid RTS->RTO upgrade 2752 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2753 2754 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2755 // Triage: biased, stack-locked, neutral, inflated 2756 2757 if (try_bias) { 2758 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2759 // Invariant: if control reaches this point in the emitted stream 2760 // then Rmark has not been modified. 2761 } 2762 andcc(Rmark, 2, G0); 2763 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2764 delayed()-> // Beware - dangling delay-slot 2765 2766 // Try stack-lock acquisition. 2767 // Transiently install BUSY (0) encoding in the mark word. 2768 // if the CAS of 0 into the mark was successful then we execute: 2769 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2770 // ST obj->mark = box -- overwrite transient 0 value 2771 // This presumes TSO, of course. 2772 2773 mov(0, Rscratch); 2774 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2775 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2776 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2777 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2778 cmp(Rscratch, Rmark); 2779 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2780 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2781 if (counters != NULL) { 2782 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2783 } 2784 ba(done); 2785 delayed()->st_ptr(Rbox, mark_addr); 2786 2787 bind(Recursive); 2788 // Stack-lock attempt failed - check for recursive stack-lock. 2789 // Tests show that we can remove the recursive case with no impact 2790 // on refworkload 0.83. If we need to reduce the size of the code 2791 // emitted by compiler_lock_object() the recursive case is perfect 2792 // candidate. 2793 // 2794 // A more extreme idea is to always inflate on stack-lock recursion. 2795 // This lets us eliminate the recursive checks in compiler_lock_object 2796 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2797 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2798 // and showed a performance *increase*. In the same experiment I eliminated 2799 // the fast-path stack-lock code from the interpreter and always passed 2800 // control to the "slow" operators in synchronizer.cpp. 2801 2802 // RScratch contains the fetched obj->mark value from the failed CAS. 2803 #ifdef _LP64 2804 sub(Rscratch, STACK_BIAS, Rscratch); 2805 #endif 2806 sub(Rscratch, SP, Rscratch); 2807 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2808 andcc(Rscratch, 0xfffff003, Rscratch); 2809 if (counters != NULL) { 2810 // Accounting needs the Rscratch register 2811 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2812 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2813 ba_short(done); 2814 } else { 2815 ba(done); 2816 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2817 } 2818 2819 bind (IsInflated); 2820 2821 // Try to CAS m->owner from null to Self 2822 // Invariant: if we acquire the lock then _recursions should be 0. 2823 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2824 mov(G2_thread, Rscratch); 2825 cas_ptr(Rmark, G0, Rscratch); 2826 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2827 // set icc.zf : 1=success 0=failure 2828 // ST box->displaced_header = NonZero. 2829 // Any non-zero value suffices: 2830 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2831 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2832 // Intentional fall-through into done 2833 } 2834 2835 bind (done); 2836 } 2837 2838 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2839 Register Rbox, Register Rscratch, 2840 bool try_bias) { 2841 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2842 2843 Label done ; 2844 2845 if (EmitSync & 4) { 2846 cmp(SP, G0); 2847 return ; 2848 } 2849 2850 if (EmitSync & 8) { 2851 if (try_bias) { 2852 biased_locking_exit(mark_addr, Rscratch, done); 2853 } 2854 2855 // Test first if it is a fast recursive unlock 2856 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2857 br_null_short(Rmark, Assembler::pt, done); 2858 2859 // Check if it is still a light weight lock, this is is true if we see 2860 // the stack address of the basicLock in the markOop of the object 2861 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2862 cas_ptr(mark_addr.base(), Rbox, Rmark); 2863 ba(done); 2864 delayed()->cmp(Rbox, Rmark); 2865 bind(done); 2866 return ; 2867 } 2868 2869 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2870 // is too large performance rolls abruptly off a cliff. 2871 // This could be related to inlining policies, code cache management, or 2872 // I$ effects. 2873 Label LStacked ; 2874 2875 if (try_bias) { 2876 // TODO: eliminate redundant LDs of obj->mark 2877 biased_locking_exit(mark_addr, Rscratch, done); 2878 } 2879 2880 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2881 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2882 andcc(Rscratch, Rscratch, G0); 2883 brx(Assembler::zero, false, Assembler::pn, done); 2884 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2885 andcc(Rmark, 2, G0); 2886 brx(Assembler::zero, false, Assembler::pt, LStacked); 2887 delayed()->nop(); 2888 2889 // It's inflated 2890 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2891 // the ST of 0 into _owner which releases the lock. This prevents loads 2892 // and stores within the critical section from reordering (floating) 2893 // past the store that releases the lock. But TSO is a strong memory model 2894 // and that particular flavor of barrier is a noop, so we can safely elide it. 2895 // Note that we use 1-0 locking by default for the inflated case. We 2896 // close the resultant (and rare) race by having contended threads in 2897 // monitorenter periodically poll _owner. 2898 2899 if (EmitSync & 1024) { 2900 // Emit code to check that _owner == Self 2901 // We could fold the _owner test into subsequent code more efficiently 2902 // than using a stand-alone check, but since _owner checking is off by 2903 // default we don't bother. We also might consider predicating the 2904 // _owner==Self check on Xcheck:jni or running on a debug build. 2905 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2906 orcc(Rscratch, G0, G0); 2907 brx(Assembler::notZero, false, Assembler::pn, done); 2908 delayed()->nop(); 2909 } 2910 2911 if (EmitSync & 512) { 2912 // classic lock release code absent 1-0 locking 2913 // m->Owner = null; 2914 // membar #storeload 2915 // if (m->cxq|m->EntryList) == null goto Success 2916 // if (m->succ != null) goto Success 2917 // if CAS (&m->Owner,0,Self) != 0 goto Success 2918 // goto SlowPath 2919 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2920 orcc(Rbox, G0, G0); 2921 brx(Assembler::notZero, false, Assembler::pn, done); 2922 delayed()->nop(); 2923 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2924 if (os::is_MP()) { membar(StoreLoad); } 2925 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2926 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2927 orcc(Rbox, Rscratch, G0); 2928 brx(Assembler::zero, false, Assembler::pt, done); 2929 delayed()-> 2930 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2931 andcc(Rscratch, Rscratch, G0); 2932 brx(Assembler::notZero, false, Assembler::pt, done); 2933 delayed()->andcc(G0, G0, G0); 2934 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2935 mov(G2_thread, Rscratch); 2936 cas_ptr(Rmark, G0, Rscratch); 2937 cmp(Rscratch, G0); 2938 // invert icc.zf and goto done 2939 brx(Assembler::notZero, false, Assembler::pt, done); 2940 delayed()->cmp(G0, G0); 2941 br(Assembler::always, false, Assembler::pt, done); 2942 delayed()->cmp(G0, 1); 2943 } else { 2944 // 1-0 form : avoids CAS and MEMBAR in the common case 2945 // Do not bother to ratify that m->Owner == Self. 2946 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2947 orcc(Rbox, G0, G0); 2948 brx(Assembler::notZero, false, Assembler::pn, done); 2949 delayed()-> 2950 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2951 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2952 orcc(Rbox, Rscratch, G0); 2953 if (EmitSync & 16384) { 2954 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2955 // we should transfer control directly to the slow-path. 2956 // This test makes the reacquire operation below very infrequent. 2957 // The logic is equivalent to : 2958 // if (cxq|EntryList) == null : Owner=null; goto Success 2959 // if succ == null : goto SlowPath 2960 // Owner=null; membar #storeload 2961 // if succ != null : goto Success 2962 // if CAS(&Owner,null,Self) != null goto Success 2963 // goto SlowPath 2964 brx(Assembler::zero, true, Assembler::pt, done); 2965 delayed()-> 2966 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2967 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2968 andcc(Rscratch, Rscratch, G0) ; 2969 brx(Assembler::zero, false, Assembler::pt, done); 2970 delayed()->orcc(G0, 1, G0); 2971 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2972 } else { 2973 brx(Assembler::zero, false, Assembler::pt, done); 2974 delayed()-> 2975 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2976 } 2977 if (os::is_MP()) { membar(StoreLoad); } 2978 // Check that _succ is (or remains) non-zero 2979 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2980 andcc(Rscratch, Rscratch, G0); 2981 brx(Assembler::notZero, false, Assembler::pt, done); 2982 delayed()->andcc(G0, G0, G0); 2983 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2984 mov(G2_thread, Rscratch); 2985 cas_ptr(Rmark, G0, Rscratch); 2986 cmp(Rscratch, G0); 2987 // invert icc.zf and goto done 2988 // A slightly better v8+/v9 idiom would be the following: 2989 // movrnz Rscratch,1,Rscratch 2990 // ba done 2991 // xorcc Rscratch,1,G0 2992 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2993 brx(Assembler::notZero, false, Assembler::pt, done); 2994 delayed()->cmp(G0, G0); 2995 br(Assembler::always, false, Assembler::pt, done); 2996 delayed()->cmp(G0, 1); 2997 } 2998 2999 bind (LStacked); 3000 // Consider: we could replace the expensive CAS in the exit 3001 // path with a simple ST of the displaced mark value fetched from 3002 // the on-stack basiclock box. That admits a race where a thread T2 3003 // in the slow lock path -- inflating with monitor M -- could race a 3004 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3005 // More precisely T1 in the stack-lock unlock path could "stomp" the 3006 // inflated mark value M installed by T2, resulting in an orphan 3007 // object monitor M and T2 becoming stranded. We can remedy that situation 3008 // by having T2 periodically poll the object's mark word using timed wait 3009 // operations. If T2 discovers that a stomp has occurred it vacates 3010 // the monitor M and wakes any other threads stranded on the now-orphan M. 3011 // In addition the monitor scavenger, which performs deflation, 3012 // would also need to check for orpan monitors and stranded threads. 3013 // 3014 // Finally, inflation is also used when T2 needs to assign a hashCode 3015 // to O and O is stack-locked by T1. The "stomp" race could cause 3016 // an assigned hashCode value to be lost. We can avoid that condition 3017 // and provide the necessary hashCode stability invariants by ensuring 3018 // that hashCode generation is idempotent between copying GCs. 3019 // For example we could compute the hashCode of an object O as 3020 // O's heap address XOR some high quality RNG value that is refreshed 3021 // at GC-time. The monitor scavenger would install the hashCode 3022 // found in any orphan monitors. Again, the mechanism admits a 3023 // lost-update "stomp" WAW race but detects and recovers as needed. 3024 // 3025 // A prototype implementation showed excellent results, although 3026 // the scavenger and timeout code was rather involved. 3027 3028 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3029 cmp(Rbox, Rscratch); 3030 // Intentional fall through into done ... 3031 3032 bind(done); 3033 } 3034 3035 3036 3037 void MacroAssembler::print_CPU_state() { 3038 // %%%%% need to implement this 3039 } 3040 3041 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3042 // %%%%% need to implement this 3043 } 3044 3045 void MacroAssembler::push_IU_state() { 3046 // %%%%% need to implement this 3047 } 3048 3049 3050 void MacroAssembler::pop_IU_state() { 3051 // %%%%% need to implement this 3052 } 3053 3054 3055 void MacroAssembler::push_FPU_state() { 3056 // %%%%% need to implement this 3057 } 3058 3059 3060 void MacroAssembler::pop_FPU_state() { 3061 // %%%%% need to implement this 3062 } 3063 3064 3065 void MacroAssembler::push_CPU_state() { 3066 // %%%%% need to implement this 3067 } 3068 3069 3070 void MacroAssembler::pop_CPU_state() { 3071 // %%%%% need to implement this 3072 } 3073 3074 3075 3076 void MacroAssembler::verify_tlab() { 3077 #ifdef ASSERT 3078 if (UseTLAB && VerifyOops) { 3079 Label next, next2, ok; 3080 Register t1 = L0; 3081 Register t2 = L1; 3082 Register t3 = L2; 3083 3084 save_frame(0); 3085 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3086 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3087 or3(t1, t2, t3); 3088 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3089 STOP("assert(top >= start)"); 3090 should_not_reach_here(); 3091 3092 bind(next); 3093 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3094 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3095 or3(t3, t2, t3); 3096 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3097 STOP("assert(top <= end)"); 3098 should_not_reach_here(); 3099 3100 bind(next2); 3101 and3(t3, MinObjAlignmentInBytesMask, t3); 3102 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3103 STOP("assert(aligned)"); 3104 should_not_reach_here(); 3105 3106 bind(ok); 3107 restore(); 3108 } 3109 #endif 3110 } 3111 3112 3113 void MacroAssembler::eden_allocate( 3114 Register obj, // result: pointer to object after successful allocation 3115 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3116 int con_size_in_bytes, // object size in bytes if known at compile time 3117 Register t1, // temp register 3118 Register t2, // temp register 3119 Label& slow_case // continuation point if fast allocation fails 3120 ){ 3121 // make sure arguments make sense 3122 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3123 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3124 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3125 3126 if (!Universe::heap()->supports_inline_contig_alloc()) { 3127 // No allocation in the shared eden. 3128 ba(slow_case); 3129 delayed()->nop(); 3130 } else { 3131 // get eden boundaries 3132 // note: we need both top & top_addr! 3133 const Register top_addr = t1; 3134 const Register end = t2; 3135 3136 CollectedHeap* ch = Universe::heap(); 3137 set((intx)ch->top_addr(), top_addr); 3138 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3139 ld_ptr(top_addr, delta, end); 3140 ld_ptr(top_addr, 0, obj); 3141 3142 // try to allocate 3143 Label retry; 3144 bind(retry); 3145 #ifdef ASSERT 3146 // make sure eden top is properly aligned 3147 { 3148 Label L; 3149 btst(MinObjAlignmentInBytesMask, obj); 3150 br(Assembler::zero, false, Assembler::pt, L); 3151 delayed()->nop(); 3152 STOP("eden top is not properly aligned"); 3153 bind(L); 3154 } 3155 #endif // ASSERT 3156 const Register free = end; 3157 sub(end, obj, free); // compute amount of free space 3158 if (var_size_in_bytes->is_valid()) { 3159 // size is unknown at compile time 3160 cmp(free, var_size_in_bytes); 3161 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3162 delayed()->add(obj, var_size_in_bytes, end); 3163 } else { 3164 // size is known at compile time 3165 cmp(free, con_size_in_bytes); 3166 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3167 delayed()->add(obj, con_size_in_bytes, end); 3168 } 3169 // Compare obj with the value at top_addr; if still equal, swap the value of 3170 // end with the value at top_addr. If not equal, read the value at top_addr 3171 // into end. 3172 cas_ptr(top_addr, obj, end); 3173 // if someone beat us on the allocation, try again, otherwise continue 3174 cmp(obj, end); 3175 brx(Assembler::notEqual, false, Assembler::pn, retry); 3176 delayed()->mov(end, obj); // nop if successfull since obj == end 3177 3178 #ifdef ASSERT 3179 // make sure eden top is properly aligned 3180 { 3181 Label L; 3182 const Register top_addr = t1; 3183 3184 set((intx)ch->top_addr(), top_addr); 3185 ld_ptr(top_addr, 0, top_addr); 3186 btst(MinObjAlignmentInBytesMask, top_addr); 3187 br(Assembler::zero, false, Assembler::pt, L); 3188 delayed()->nop(); 3189 STOP("eden top is not properly aligned"); 3190 bind(L); 3191 } 3192 #endif // ASSERT 3193 } 3194 } 3195 3196 3197 void MacroAssembler::tlab_allocate( 3198 Register obj, // result: pointer to object after successful allocation 3199 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3200 int con_size_in_bytes, // object size in bytes if known at compile time 3201 Register t1, // temp register 3202 Label& slow_case // continuation point if fast allocation fails 3203 ){ 3204 // make sure arguments make sense 3205 assert_different_registers(obj, var_size_in_bytes, t1); 3206 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3207 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3208 3209 const Register free = t1; 3210 3211 verify_tlab(); 3212 3213 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3214 3215 // calculate amount of free space 3216 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3217 sub(free, obj, free); 3218 3219 Label done; 3220 if (var_size_in_bytes == noreg) { 3221 cmp(free, con_size_in_bytes); 3222 } else { 3223 cmp(free, var_size_in_bytes); 3224 } 3225 br(Assembler::less, false, Assembler::pn, slow_case); 3226 // calculate the new top pointer 3227 if (var_size_in_bytes == noreg) { 3228 delayed()->add(obj, con_size_in_bytes, free); 3229 } else { 3230 delayed()->add(obj, var_size_in_bytes, free); 3231 } 3232 3233 bind(done); 3234 3235 #ifdef ASSERT 3236 // make sure new free pointer is properly aligned 3237 { 3238 Label L; 3239 btst(MinObjAlignmentInBytesMask, free); 3240 br(Assembler::zero, false, Assembler::pt, L); 3241 delayed()->nop(); 3242 STOP("updated TLAB free is not properly aligned"); 3243 bind(L); 3244 } 3245 #endif // ASSERT 3246 3247 // update the tlab top pointer 3248 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3249 verify_tlab(); 3250 } 3251 3252 3253 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3254 Register top = O0; 3255 Register t1 = G1; 3256 Register t2 = G3; 3257 Register t3 = O1; 3258 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3259 Label do_refill, discard_tlab; 3260 3261 if (!Universe::heap()->supports_inline_contig_alloc()) { 3262 // No allocation in the shared eden. 3263 ba(slow_case); 3264 delayed()->nop(); 3265 } 3266 3267 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3268 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3269 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3270 3271 // calculate amount of free space 3272 sub(t1, top, t1); 3273 srl_ptr(t1, LogHeapWordSize, t1); 3274 3275 // Retain tlab and allocate object in shared space if 3276 // the amount free in the tlab is too large to discard. 3277 cmp(t1, t2); 3278 3279 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3280 // increment waste limit to prevent getting stuck on this slow path 3281 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3282 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3283 } else { 3284 delayed()->nop(); 3285 // set64 does not use the temp register if the given constant is 32 bit. So 3286 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3287 // of that value. 3288 set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); 3289 add(t2, t3, t2); 3290 } 3291 3292 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3293 if (TLABStats) { 3294 // increment number of slow_allocations 3295 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3296 add(t2, 1, t2); 3297 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3298 } 3299 ba(try_eden); 3300 delayed()->nop(); 3301 3302 bind(discard_tlab); 3303 if (TLABStats) { 3304 // increment number of refills 3305 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3306 add(t2, 1, t2); 3307 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3308 // accumulate wastage 3309 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3310 add(t2, t1, t2); 3311 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3312 } 3313 3314 // if tlab is currently allocated (top or end != null) then 3315 // fill [top, end + alignment_reserve) with array object 3316 br_null_short(top, Assembler::pn, do_refill); 3317 3318 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3319 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3320 // set klass to intArrayKlass 3321 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3322 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3323 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3324 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3325 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3326 ld_ptr(t2, 0, t2); 3327 // store klass last. concurrent gcs assumes klass length is valid if 3328 // klass field is not null. 3329 store_klass(t2, top); 3330 verify_oop(top); 3331 3332 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3333 sub(top, t1, t1); // size of tlab's allocated portion 3334 incr_allocated_bytes(t1, t2, t3); 3335 3336 // refill the tlab with an eden allocation 3337 bind(do_refill); 3338 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3339 sll_ptr(t1, LogHeapWordSize, t1); 3340 // allocate new tlab, address returned in top 3341 eden_allocate(top, t1, 0, t2, t3, slow_case); 3342 3343 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3344 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3345 #ifdef ASSERT 3346 // check that tlab_size (t1) is still valid 3347 { 3348 Label ok; 3349 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3350 sll_ptr(t2, LogHeapWordSize, t2); 3351 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3352 STOP("assert(t1 == tlab_size)"); 3353 should_not_reach_here(); 3354 3355 bind(ok); 3356 } 3357 #endif // ASSERT 3358 add(top, t1, top); // t1 is tlab_size 3359 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3360 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3361 3362 if (ZeroTLAB) { 3363 // This is a fast TLAB refill, therefore the GC is not notified of it. 3364 // So compiled code must fill the new TLAB with zeroes. 3365 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3366 zero_memory(t2, t1); 3367 } 3368 verify_tlab(); 3369 ba(retry); 3370 delayed()->nop(); 3371 } 3372 3373 void MacroAssembler::zero_memory(Register base, Register index) { 3374 assert_different_registers(base, index); 3375 Label loop; 3376 bind(loop); 3377 subcc(index, HeapWordSize, index); 3378 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3379 delayed()->st_ptr(G0, base, index); 3380 } 3381 3382 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3383 Register t1, Register t2) { 3384 // Bump total bytes allocated by this thread 3385 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3386 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3387 // v8 support has gone the way of the dodo 3388 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3389 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3390 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3391 } 3392 3393 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3394 switch (cond) { 3395 // Note some conditions are synonyms for others 3396 case Assembler::never: return Assembler::always; 3397 case Assembler::zero: return Assembler::notZero; 3398 case Assembler::lessEqual: return Assembler::greater; 3399 case Assembler::less: return Assembler::greaterEqual; 3400 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3401 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3402 case Assembler::negative: return Assembler::positive; 3403 case Assembler::overflowSet: return Assembler::overflowClear; 3404 case Assembler::always: return Assembler::never; 3405 case Assembler::notZero: return Assembler::zero; 3406 case Assembler::greater: return Assembler::lessEqual; 3407 case Assembler::greaterEqual: return Assembler::less; 3408 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3409 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3410 case Assembler::positive: return Assembler::negative; 3411 case Assembler::overflowClear: return Assembler::overflowSet; 3412 } 3413 3414 ShouldNotReachHere(); return Assembler::overflowClear; 3415 } 3416 3417 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3418 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3419 Condition negated_cond = negate_condition(cond); 3420 Label L; 3421 brx(negated_cond, false, Assembler::pt, L); 3422 delayed()->nop(); 3423 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3424 bind(L); 3425 } 3426 3427 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3428 AddressLiteral addrlit(counter_addr); 3429 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3430 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3431 ld(addr, Rtmp2); 3432 inc(Rtmp2); 3433 st(Rtmp2, addr); 3434 } 3435 3436 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3437 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3438 } 3439 3440 SkipIfEqual::SkipIfEqual( 3441 MacroAssembler* masm, Register temp, const bool* flag_addr, 3442 Assembler::Condition condition) { 3443 _masm = masm; 3444 AddressLiteral flag(flag_addr); 3445 _masm->sethi(flag, temp); 3446 _masm->ldub(temp, flag.low10(), temp); 3447 _masm->tst(temp); 3448 _masm->br(condition, false, Assembler::pt, _label); 3449 _masm->delayed()->nop(); 3450 } 3451 3452 SkipIfEqual::~SkipIfEqual() { 3453 _masm->bind(_label); 3454 } 3455 3456 3457 // Writes to stack successive pages until offset reached to check for 3458 // stack overflow + shadow pages. This clobbers tsp and scratch. 3459 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3460 Register Rscratch) { 3461 // Use stack pointer in temp stack pointer 3462 mov(SP, Rtsp); 3463 3464 // Bang stack for total size given plus stack shadow page size. 3465 // Bang one page at a time because a large size can overflow yellow and 3466 // red zones (the bang will fail but stack overflow handling can't tell that 3467 // it was a stack overflow bang vs a regular segv). 3468 int offset = os::vm_page_size(); 3469 Register Roffset = Rscratch; 3470 3471 Label loop; 3472 bind(loop); 3473 set((-offset)+STACK_BIAS, Rscratch); 3474 st(G0, Rtsp, Rscratch); 3475 set(offset, Roffset); 3476 sub(Rsize, Roffset, Rsize); 3477 cmp(Rsize, G0); 3478 br(Assembler::greater, false, Assembler::pn, loop); 3479 delayed()->sub(Rtsp, Roffset, Rtsp); 3480 3481 // Bang down shadow pages too. 3482 // At this point, (tmp-0) is the last address touched, so don't 3483 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3484 // was post-decremented.) Skip this address by starting at i=1, and 3485 // touch a few more pages below. N.B. It is important to touch all 3486 // the way down to and including i=StackShadowPages. 3487 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3488 set((-i*offset)+STACK_BIAS, Rscratch); 3489 st(G0, Rtsp, Rscratch); 3490 } 3491 } 3492 3493 void MacroAssembler::reserved_stack_check() { 3494 // testing if reserved zone needs to be enabled 3495 Label no_reserved_zone_enabling; 3496 3497 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3498 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3499 3500 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3501 3502 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3503 jump_to(stub, G4_scratch); 3504 delayed()->restore(); 3505 3506 should_not_reach_here(); 3507 3508 bind(no_reserved_zone_enabling); 3509 } 3510 3511 /////////////////////////////////////////////////////////////////////////////////// 3512 #if INCLUDE_ALL_GCS 3513 3514 static address satb_log_enqueue_with_frame = NULL; 3515 static u_char* satb_log_enqueue_with_frame_end = NULL; 3516 3517 static address satb_log_enqueue_frameless = NULL; 3518 static u_char* satb_log_enqueue_frameless_end = NULL; 3519 3520 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3521 3522 static void generate_satb_log_enqueue(bool with_frame) { 3523 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3524 CodeBuffer buf(bb); 3525 MacroAssembler masm(&buf); 3526 3527 #define __ masm. 3528 3529 address start = __ pc(); 3530 Register pre_val; 3531 3532 Label refill, restart; 3533 if (with_frame) { 3534 __ save_frame(0); 3535 pre_val = I0; // Was O0 before the save. 3536 } else { 3537 pre_val = O0; 3538 } 3539 3540 int satb_q_index_byte_offset = 3541 in_bytes(JavaThread::satb_mark_queue_offset() + 3542 SATBMarkQueue::byte_offset_of_index()); 3543 3544 int satb_q_buf_byte_offset = 3545 in_bytes(JavaThread::satb_mark_queue_offset() + 3546 SATBMarkQueue::byte_offset_of_buf()); 3547 3548 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3549 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3550 "check sizes in assembly below"); 3551 3552 __ bind(restart); 3553 3554 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3555 // so ld_ptr is appropriate. 3556 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3557 3558 // index == 0? 3559 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3560 3561 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3562 __ sub(L0, oopSize, L0); 3563 3564 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3565 if (!with_frame) { 3566 // Use return-from-leaf 3567 __ retl(); 3568 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3569 } else { 3570 // Not delayed. 3571 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3572 } 3573 if (with_frame) { 3574 __ ret(); 3575 __ delayed()->restore(); 3576 } 3577 __ bind(refill); 3578 3579 address handle_zero = 3580 CAST_FROM_FN_PTR(address, 3581 &SATBMarkQueueSet::handle_zero_index_for_thread); 3582 // This should be rare enough that we can afford to save all the 3583 // scratch registers that the calling context might be using. 3584 __ mov(G1_scratch, L0); 3585 __ mov(G3_scratch, L1); 3586 __ mov(G4, L2); 3587 // We need the value of O0 above (for the write into the buffer), so we 3588 // save and restore it. 3589 __ mov(O0, L3); 3590 // Since the call will overwrite O7, we save and restore that, as well. 3591 __ mov(O7, L4); 3592 __ call_VM_leaf(L5, handle_zero, G2_thread); 3593 __ mov(L0, G1_scratch); 3594 __ mov(L1, G3_scratch); 3595 __ mov(L2, G4); 3596 __ mov(L3, O0); 3597 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3598 __ delayed()->mov(L4, O7); 3599 3600 if (with_frame) { 3601 satb_log_enqueue_with_frame = start; 3602 satb_log_enqueue_with_frame_end = __ pc(); 3603 } else { 3604 satb_log_enqueue_frameless = start; 3605 satb_log_enqueue_frameless_end = __ pc(); 3606 } 3607 3608 #undef __ 3609 } 3610 3611 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 3612 if (with_frame) { 3613 if (satb_log_enqueue_with_frame == 0) { 3614 generate_satb_log_enqueue(with_frame); 3615 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3616 } 3617 } else { 3618 if (satb_log_enqueue_frameless == 0) { 3619 generate_satb_log_enqueue(with_frame); 3620 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3621 } 3622 } 3623 } 3624 3625 void MacroAssembler::g1_write_barrier_pre(Register obj, 3626 Register index, 3627 int offset, 3628 Register pre_val, 3629 Register tmp, 3630 bool preserve_o_regs) { 3631 Label filtered; 3632 3633 if (obj == noreg) { 3634 // We are not loading the previous value so make 3635 // sure that we don't trash the value in pre_val 3636 // with the code below. 3637 assert_different_registers(pre_val, tmp); 3638 } else { 3639 // We will be loading the previous value 3640 // in this code so... 3641 assert(offset == 0 || index == noreg, "choose one"); 3642 assert(pre_val == noreg, "check this code"); 3643 } 3644 3645 // Is marking active? 3646 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3647 ld(G2, 3648 in_bytes(JavaThread::satb_mark_queue_offset() + 3649 SATBMarkQueue::byte_offset_of_active()), 3650 tmp); 3651 } else { 3652 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3653 "Assumption"); 3654 ldsb(G2, 3655 in_bytes(JavaThread::satb_mark_queue_offset() + 3656 SATBMarkQueue::byte_offset_of_active()), 3657 tmp); 3658 } 3659 3660 // Is marking active? 3661 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3662 3663 // Do we need to load the previous value? 3664 if (obj != noreg) { 3665 // Load the previous value... 3666 if (index == noreg) { 3667 if (Assembler::is_simm13(offset)) { 3668 load_heap_oop(obj, offset, tmp); 3669 } else { 3670 set(offset, tmp); 3671 load_heap_oop(obj, tmp, tmp); 3672 } 3673 } else { 3674 load_heap_oop(obj, index, tmp); 3675 } 3676 // Previous value has been loaded into tmp 3677 pre_val = tmp; 3678 } 3679 3680 assert(pre_val != noreg, "must have a real register"); 3681 3682 // Is the previous value null? 3683 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3684 3685 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3686 // case, pre_val will be a scratch G-reg, but there are some cases in 3687 // which it's an O-reg. In the first case, do a normal call. In the 3688 // latter, do a save here and call the frameless version. 3689 3690 guarantee(pre_val->is_global() || pre_val->is_out(), 3691 "Or we need to think harder."); 3692 3693 if (pre_val->is_global() && !preserve_o_regs) { 3694 generate_satb_log_enqueue_if_necessary(true); // with frame 3695 3696 call(satb_log_enqueue_with_frame); 3697 delayed()->mov(pre_val, O0); 3698 } else { 3699 generate_satb_log_enqueue_if_necessary(false); // frameless 3700 3701 save_frame(0); 3702 call(satb_log_enqueue_frameless); 3703 delayed()->mov(pre_val->after_save(), O0); 3704 restore(); 3705 } 3706 3707 bind(filtered); 3708 } 3709 3710 static address dirty_card_log_enqueue = 0; 3711 static u_char* dirty_card_log_enqueue_end = 0; 3712 3713 // This gets to assume that o0 contains the object address. 3714 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3715 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3716 CodeBuffer buf(bb); 3717 MacroAssembler masm(&buf); 3718 #define __ masm. 3719 address start = __ pc(); 3720 3721 Label not_already_dirty, restart, refill, young_card; 3722 3723 #ifdef _LP64 3724 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3725 #else 3726 __ srl(O0, CardTableModRefBS::card_shift, O0); 3727 #endif 3728 AddressLiteral addrlit(byte_map_base); 3729 __ set(addrlit, O1); // O1 := <card table base> 3730 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3731 3732 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3733 3734 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3735 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3736 3737 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3738 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3739 3740 __ bind(young_card); 3741 // We didn't take the branch, so we're already dirty: return. 3742 // Use return-from-leaf 3743 __ retl(); 3744 __ delayed()->nop(); 3745 3746 // Not dirty. 3747 __ bind(not_already_dirty); 3748 3749 // Get O0 + O1 into a reg by itself 3750 __ add(O0, O1, O3); 3751 3752 // First, dirty it. 3753 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3754 3755 int dirty_card_q_index_byte_offset = 3756 in_bytes(JavaThread::dirty_card_queue_offset() + 3757 DirtyCardQueue::byte_offset_of_index()); 3758 int dirty_card_q_buf_byte_offset = 3759 in_bytes(JavaThread::dirty_card_queue_offset() + 3760 DirtyCardQueue::byte_offset_of_buf()); 3761 __ bind(restart); 3762 3763 // Load the index into the update buffer. DirtyCardQueue::_index is 3764 // a size_t so ld_ptr is appropriate here. 3765 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3766 3767 // index == 0? 3768 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3769 3770 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3771 __ sub(L0, oopSize, L0); 3772 3773 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3774 // Use return-from-leaf 3775 __ retl(); 3776 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3777 3778 __ bind(refill); 3779 address handle_zero = 3780 CAST_FROM_FN_PTR(address, 3781 &DirtyCardQueueSet::handle_zero_index_for_thread); 3782 // This should be rare enough that we can afford to save all the 3783 // scratch registers that the calling context might be using. 3784 __ mov(G1_scratch, L3); 3785 __ mov(G3_scratch, L5); 3786 // We need the value of O3 above (for the write into the buffer), so we 3787 // save and restore it. 3788 __ mov(O3, L6); 3789 // Since the call will overwrite O7, we save and restore that, as well. 3790 __ mov(O7, L4); 3791 3792 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3793 __ mov(L3, G1_scratch); 3794 __ mov(L5, G3_scratch); 3795 __ mov(L6, O3); 3796 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3797 __ delayed()->mov(L4, O7); 3798 3799 dirty_card_log_enqueue = start; 3800 dirty_card_log_enqueue_end = __ pc(); 3801 // XXX Should have a guarantee here about not going off the end! 3802 // Does it already do so? Do an experiment... 3803 3804 #undef __ 3805 3806 } 3807 3808 static inline void 3809 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 3810 if (dirty_card_log_enqueue == 0) { 3811 generate_dirty_card_log_enqueue(byte_map_base); 3812 assert(dirty_card_log_enqueue != 0, "postcondition."); 3813 } 3814 } 3815 3816 3817 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3818 3819 Label filtered; 3820 MacroAssembler* post_filter_masm = this; 3821 3822 if (new_val == G0) return; 3823 3824 G1SATBCardTableLoggingModRefBS* bs = 3825 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); 3826 3827 if (G1RSBarrierRegionFilter) { 3828 xor3(store_addr, new_val, tmp); 3829 #ifdef _LP64 3830 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3831 #else 3832 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3833 #endif 3834 3835 // XXX Should I predict this taken or not? Does it matter? 3836 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3837 } 3838 3839 // If the "store_addr" register is an "in" or "local" register, move it to 3840 // a scratch reg so we can pass it as an argument. 3841 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3842 // Pick a scratch register different from "tmp". 3843 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3844 // Make sure we use up the delay slot! 3845 if (use_scr) { 3846 post_filter_masm->mov(store_addr, scr); 3847 } else { 3848 post_filter_masm->nop(); 3849 } 3850 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 3851 save_frame(0); 3852 call(dirty_card_log_enqueue); 3853 if (use_scr) { 3854 delayed()->mov(scr, O0); 3855 } else { 3856 delayed()->mov(store_addr->after_save(), O0); 3857 } 3858 restore(); 3859 3860 bind(filtered); 3861 } 3862 3863 #endif // INCLUDE_ALL_GCS 3864 /////////////////////////////////////////////////////////////////////////////////// 3865 3866 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3867 // If we're writing constant NULL, we can skip the write barrier. 3868 if (new_val == G0) return; 3869 CardTableModRefBS* bs = 3870 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 3871 assert(bs->kind() == BarrierSet::CardTableForRS || 3872 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3873 card_table_write(bs->byte_map_base, tmp, store_addr); 3874 } 3875 3876 void MacroAssembler::load_mirror(Register mirror, Register method) { 3877 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3878 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3879 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3880 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3881 ld_ptr(mirror, mirror_offset, mirror); 3882 } 3883 3884 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3885 // The number of bytes in this code is used by 3886 // MachCallDynamicJavaNode::ret_addr_offset() 3887 // if this changes, change that. 3888 if (UseCompressedClassPointers) { 3889 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3890 decode_klass_not_null(klass); 3891 } else { 3892 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3893 } 3894 } 3895 3896 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3897 if (UseCompressedClassPointers) { 3898 assert(dst_oop != klass, "not enough registers"); 3899 encode_klass_not_null(klass); 3900 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3901 } else { 3902 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3903 } 3904 } 3905 3906 void MacroAssembler::store_klass_gap(Register s, Register d) { 3907 if (UseCompressedClassPointers) { 3908 assert(s != d, "not enough registers"); 3909 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3910 } 3911 } 3912 3913 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3914 if (UseCompressedOops) { 3915 lduw(s, d); 3916 decode_heap_oop(d); 3917 } else { 3918 ld_ptr(s, d); 3919 } 3920 } 3921 3922 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3923 if (UseCompressedOops) { 3924 lduw(s1, s2, d); 3925 decode_heap_oop(d, d); 3926 } else { 3927 ld_ptr(s1, s2, d); 3928 } 3929 } 3930 3931 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3932 if (UseCompressedOops) { 3933 lduw(s1, simm13a, d); 3934 decode_heap_oop(d, d); 3935 } else { 3936 ld_ptr(s1, simm13a, d); 3937 } 3938 } 3939 3940 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3941 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3942 else load_heap_oop(s1, s2.as_register(), d); 3943 } 3944 3945 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3946 if (UseCompressedOops) { 3947 assert(s1 != d && s2 != d, "not enough registers"); 3948 encode_heap_oop(d); 3949 st(d, s1, s2); 3950 } else { 3951 st_ptr(d, s1, s2); 3952 } 3953 } 3954 3955 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3956 if (UseCompressedOops) { 3957 assert(s1 != d, "not enough registers"); 3958 encode_heap_oop(d); 3959 st(d, s1, simm13a); 3960 } else { 3961 st_ptr(d, s1, simm13a); 3962 } 3963 } 3964 3965 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3966 if (UseCompressedOops) { 3967 assert(a.base() != d, "not enough registers"); 3968 encode_heap_oop(d); 3969 st(d, a, offset); 3970 } else { 3971 st_ptr(d, a, offset); 3972 } 3973 } 3974 3975 3976 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3977 assert (UseCompressedOops, "must be compressed"); 3978 assert (Universe::heap() != NULL, "java heap should be initialized"); 3979 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3980 verify_oop(src); 3981 if (Universe::narrow_oop_base() == NULL) { 3982 srlx(src, LogMinObjAlignmentInBytes, dst); 3983 return; 3984 } 3985 Label done; 3986 if (src == dst) { 3987 // optimize for frequent case src == dst 3988 bpr(rc_nz, true, Assembler::pt, src, done); 3989 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3990 bind(done); 3991 srlx(src, LogMinObjAlignmentInBytes, dst); 3992 } else { 3993 bpr(rc_z, false, Assembler::pn, src, done); 3994 delayed() -> mov(G0, dst); 3995 // could be moved before branch, and annulate delay, 3996 // but may add some unneeded work decoding null 3997 sub(src, G6_heapbase, dst); 3998 srlx(dst, LogMinObjAlignmentInBytes, dst); 3999 bind(done); 4000 } 4001 } 4002 4003 4004 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4005 assert (UseCompressedOops, "must be compressed"); 4006 assert (Universe::heap() != NULL, "java heap should be initialized"); 4007 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4008 verify_oop(r); 4009 if (Universe::narrow_oop_base() != NULL) 4010 sub(r, G6_heapbase, r); 4011 srlx(r, LogMinObjAlignmentInBytes, r); 4012 } 4013 4014 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 4015 assert (UseCompressedOops, "must be compressed"); 4016 assert (Universe::heap() != NULL, "java heap should be initialized"); 4017 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4018 verify_oop(src); 4019 if (Universe::narrow_oop_base() == NULL) { 4020 srlx(src, LogMinObjAlignmentInBytes, dst); 4021 } else { 4022 sub(src, G6_heapbase, dst); 4023 srlx(dst, LogMinObjAlignmentInBytes, dst); 4024 } 4025 } 4026 4027 // Same algorithm as oops.inline.hpp decode_heap_oop. 4028 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4029 assert (UseCompressedOops, "must be compressed"); 4030 assert (Universe::heap() != NULL, "java heap should be initialized"); 4031 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4032 sllx(src, LogMinObjAlignmentInBytes, dst); 4033 if (Universe::narrow_oop_base() != NULL) { 4034 Label done; 4035 bpr(rc_nz, true, Assembler::pt, dst, done); 4036 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4037 bind(done); 4038 } 4039 verify_oop(dst); 4040 } 4041 4042 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4043 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4044 // pd_code_size_limit. 4045 // Also do not verify_oop as this is called by verify_oop. 4046 assert (UseCompressedOops, "must be compressed"); 4047 assert (Universe::heap() != NULL, "java heap should be initialized"); 4048 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4049 sllx(r, LogMinObjAlignmentInBytes, r); 4050 if (Universe::narrow_oop_base() != NULL) 4051 add(r, G6_heapbase, r); 4052 } 4053 4054 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4055 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4056 // pd_code_size_limit. 4057 // Also do not verify_oop as this is called by verify_oop. 4058 assert (UseCompressedOops, "must be compressed"); 4059 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4060 sllx(src, LogMinObjAlignmentInBytes, dst); 4061 if (Universe::narrow_oop_base() != NULL) 4062 add(dst, G6_heapbase, dst); 4063 } 4064 4065 void MacroAssembler::encode_klass_not_null(Register r) { 4066 assert (UseCompressedClassPointers, "must be compressed"); 4067 if (Universe::narrow_klass_base() != NULL) { 4068 assert(r != G6_heapbase, "bad register choice"); 4069 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4070 sub(r, G6_heapbase, r); 4071 if (Universe::narrow_klass_shift() != 0) { 4072 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4073 srlx(r, LogKlassAlignmentInBytes, r); 4074 } 4075 reinit_heapbase(); 4076 } else { 4077 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4078 srlx(r, Universe::narrow_klass_shift(), r); 4079 } 4080 } 4081 4082 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4083 if (src == dst) { 4084 encode_klass_not_null(src); 4085 } else { 4086 assert (UseCompressedClassPointers, "must be compressed"); 4087 if (Universe::narrow_klass_base() != NULL) { 4088 set((intptr_t)Universe::narrow_klass_base(), dst); 4089 sub(src, dst, dst); 4090 if (Universe::narrow_klass_shift() != 0) { 4091 srlx(dst, LogKlassAlignmentInBytes, dst); 4092 } 4093 } else { 4094 // shift src into dst 4095 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4096 srlx(src, Universe::narrow_klass_shift(), dst); 4097 } 4098 } 4099 } 4100 4101 // Function instr_size_for_decode_klass_not_null() counts the instructions 4102 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4103 // the instructions they generate change, then this method needs to be updated. 4104 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4105 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4106 int num_instrs = 1; // shift src,dst or add 4107 if (Universe::narrow_klass_base() != NULL) { 4108 // set + add + set 4109 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 4110 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4111 if (Universe::narrow_klass_shift() != 0) { 4112 num_instrs += 1; // sllx 4113 } 4114 } 4115 return num_instrs * BytesPerInstWord; 4116 } 4117 4118 // !!! If the instructions that get generated here change then function 4119 // instr_size_for_decode_klass_not_null() needs to get updated. 4120 void MacroAssembler::decode_klass_not_null(Register r) { 4121 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4122 // pd_code_size_limit. 4123 assert (UseCompressedClassPointers, "must be compressed"); 4124 if (Universe::narrow_klass_base() != NULL) { 4125 assert(r != G6_heapbase, "bad register choice"); 4126 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4127 if (Universe::narrow_klass_shift() != 0) 4128 sllx(r, LogKlassAlignmentInBytes, r); 4129 add(r, G6_heapbase, r); 4130 reinit_heapbase(); 4131 } else { 4132 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4133 sllx(r, Universe::narrow_klass_shift(), r); 4134 } 4135 } 4136 4137 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4138 if (src == dst) { 4139 decode_klass_not_null(src); 4140 } else { 4141 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4142 // pd_code_size_limit. 4143 assert (UseCompressedClassPointers, "must be compressed"); 4144 if (Universe::narrow_klass_base() != NULL) { 4145 if (Universe::narrow_klass_shift() != 0) { 4146 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4147 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4148 sllx(src, LogKlassAlignmentInBytes, dst); 4149 add(dst, G6_heapbase, dst); 4150 reinit_heapbase(); 4151 } else { 4152 set((intptr_t)Universe::narrow_klass_base(), dst); 4153 add(src, dst, dst); 4154 } 4155 } else { 4156 // shift/mov src into dst. 4157 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4158 sllx(src, Universe::narrow_klass_shift(), dst); 4159 } 4160 } 4161 } 4162 4163 void MacroAssembler::reinit_heapbase() { 4164 if (UseCompressedOops || UseCompressedClassPointers) { 4165 if (Universe::heap() != NULL) { 4166 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4167 } else { 4168 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4169 load_ptr_contents(base, G6_heapbase); 4170 } 4171 } 4172 } 4173 4174 #ifdef COMPILER2 4175 4176 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4177 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4178 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4179 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4180 Label Lloop, Lslow; 4181 assert(UseVIS >= 3, "VIS3 is required"); 4182 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4183 assert_different_registers(ftmp1, ftmp2, ftmp3); 4184 4185 // Check if cnt >= 8 (= 16 bytes) 4186 cmp(cnt, 8); 4187 br(Assembler::less, false, Assembler::pn, Lslow); 4188 delayed()->mov(cnt, result); // copy count 4189 4190 // Check for 8-byte alignment of src and dst 4191 or3(src, dst, tmp1); 4192 andcc(tmp1, 7, G0); 4193 br(Assembler::notZero, false, Assembler::pn, Lslow); 4194 delayed()->nop(); 4195 4196 // Set mask for bshuffle instruction 4197 Register mask = tmp4; 4198 set(0x13579bdf, mask); 4199 bmask(mask, G0, G0); 4200 4201 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4202 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4203 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4204 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4205 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4206 4207 // Load first 8 bytes 4208 ldx(src, 0, tmp1); 4209 4210 bind(Lloop); 4211 // Load next 8 bytes 4212 ldx(src, 8, tmp2); 4213 4214 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4215 // Although we have to move the data between integer and floating point registers, this is 4216 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4217 or3(tmp1, tmp2, tmp3); 4218 btst(tmp3, mask); 4219 // annul zeroing if branch is not taken to preserve original count 4220 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4221 delayed()->mov(G0, result); // 0 - failed 4222 4223 // Move bytes into float register 4224 movxtod(tmp1, ftmp1); 4225 movxtod(tmp2, ftmp2); 4226 4227 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4228 bshuffle(ftmp1, ftmp2, ftmp3); 4229 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4230 4231 // Increment addresses and decrement count 4232 inc(src, 16); 4233 inc(dst, 8); 4234 dec(cnt, 8); 4235 4236 cmp(cnt, 8); 4237 // annul LDX if branch is not taken to prevent access past end of string 4238 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4239 delayed()->ldx(src, 0, tmp1); 4240 4241 // Fallback to slow version 4242 bind(Lslow); 4243 } 4244 4245 // Compress char[] to byte[]. Return 0 on failure. 4246 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4247 Label Lloop; 4248 assert_different_registers(src, dst, cnt, tmp, result); 4249 4250 lduh(src, 0, tmp); 4251 4252 bind(Lloop); 4253 inc(src, sizeof(jchar)); 4254 cmp(tmp, 0xff); 4255 // annul zeroing if branch is not taken to preserve original count 4256 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4257 delayed()->mov(G0, result); // 0 - failed 4258 deccc(cnt); 4259 stb(tmp, dst, 0); 4260 inc(dst); 4261 // annul LDUH if branch is not taken to prevent access past end of string 4262 br(Assembler::notZero, true, Assembler::pt, Lloop); 4263 delayed()->lduh(src, 0, tmp); // hoisted 4264 } 4265 4266 // Inflate byte[] to char[] by inflating 16 bytes at once. 4267 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4268 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4269 Label Lloop, Lslow; 4270 assert(UseVIS >= 3, "VIS3 is required"); 4271 assert_different_registers(src, dst, cnt, tmp); 4272 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4273 4274 // Check if cnt >= 8 (= 16 bytes) 4275 cmp(cnt, 8); 4276 br(Assembler::less, false, Assembler::pn, Lslow); 4277 delayed()->nop(); 4278 4279 // Check for 8-byte alignment of src and dst 4280 or3(src, dst, tmp); 4281 andcc(tmp, 7, G0); 4282 br(Assembler::notZero, false, Assembler::pn, Lslow); 4283 // Initialize float register to zero 4284 FloatRegister zerof = ftmp4; 4285 delayed()->fzero(FloatRegisterImpl::D, zerof); 4286 4287 // Load first 8 bytes 4288 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4289 4290 bind(Lloop); 4291 inc(src, 8); 4292 dec(cnt, 8); 4293 4294 // Inflate the string by interleaving each byte from the source array 4295 // with a zero byte and storing the result in the destination array. 4296 fpmerge(zerof, ftmp1->successor(), ftmp2); 4297 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4298 fpmerge(zerof, ftmp1, ftmp3); 4299 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4300 4301 inc(dst, 16); 4302 4303 cmp(cnt, 8); 4304 // annul LDX if branch is not taken to prevent access past end of string 4305 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4306 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4307 4308 // Fallback to slow version 4309 bind(Lslow); 4310 } 4311 4312 // Inflate byte[] to char[]. 4313 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4314 Label Loop; 4315 assert_different_registers(src, dst, cnt, tmp); 4316 4317 ldub(src, 0, tmp); 4318 bind(Loop); 4319 inc(src); 4320 deccc(cnt); 4321 sth(tmp, dst, 0); 4322 inc(dst, sizeof(jchar)); 4323 // annul LDUB if branch is not taken to prevent access past end of string 4324 br(Assembler::notZero, true, Assembler::pt, Loop); 4325 delayed()->ldub(src, 0, tmp); // hoisted 4326 } 4327 4328 void MacroAssembler::string_compare(Register str1, Register str2, 4329 Register cnt1, Register cnt2, 4330 Register tmp1, Register tmp2, 4331 Register result, int ae) { 4332 Label Ldone, Lloop; 4333 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4334 int stride1, stride2; 4335 4336 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4337 // we interchange str1 and str2 in the UL case and negate the result. 4338 // Like this, str1 is always latin1 encoded, expect for the UU case. 4339 4340 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4341 srl(cnt2, 1, cnt2); 4342 } 4343 4344 // See if the lengths are different, and calculate min in cnt1. 4345 // Save diff in case we need it for a tie-breaker. 4346 Label Lskip; 4347 Register diff = tmp1; 4348 subcc(cnt1, cnt2, diff); 4349 br(Assembler::greater, true, Assembler::pt, Lskip); 4350 // cnt2 is shorter, so use its count: 4351 delayed()->mov(cnt2, cnt1); 4352 bind(Lskip); 4353 4354 // Rename registers 4355 Register limit1 = cnt1; 4356 Register limit2 = limit1; 4357 Register chr1 = result; 4358 Register chr2 = cnt2; 4359 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4360 // We need an additional register to keep track of two limits 4361 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4362 limit2 = tmp2; 4363 } 4364 4365 // Is the minimum length zero? 4366 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4367 br(Assembler::equal, true, Assembler::pn, Ldone); 4368 // result is difference in lengths 4369 if (ae == StrIntrinsicNode::UU) { 4370 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4371 } else { 4372 delayed()->mov(diff, result); 4373 } 4374 4375 // Load first characters 4376 if (ae == StrIntrinsicNode::LL) { 4377 stride1 = stride2 = sizeof(jbyte); 4378 ldub(str1, 0, chr1); 4379 ldub(str2, 0, chr2); 4380 } else if (ae == StrIntrinsicNode::UU) { 4381 stride1 = stride2 = sizeof(jchar); 4382 lduh(str1, 0, chr1); 4383 lduh(str2, 0, chr2); 4384 } else { 4385 stride1 = sizeof(jbyte); 4386 stride2 = sizeof(jchar); 4387 ldub(str1, 0, chr1); 4388 lduh(str2, 0, chr2); 4389 } 4390 4391 // Compare first characters 4392 subcc(chr1, chr2, chr1); 4393 br(Assembler::notZero, false, Assembler::pt, Ldone); 4394 assert(chr1 == result, "result must be pre-placed"); 4395 delayed()->nop(); 4396 4397 // Check if the strings start at same location 4398 cmp(str1, str2); 4399 brx(Assembler::equal, true, Assembler::pn, Ldone); 4400 delayed()->mov(G0, result); // result is zero 4401 4402 // We have no guarantee that on 64 bit the higher half of limit is 0 4403 signx(limit1); 4404 4405 // Get limit 4406 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4407 sll(limit1, 1, limit2); 4408 subcc(limit2, stride2, chr2); 4409 } 4410 subcc(limit1, stride1, chr1); 4411 br(Assembler::zero, true, Assembler::pn, Ldone); 4412 // result is difference in lengths 4413 if (ae == StrIntrinsicNode::UU) { 4414 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4415 } else { 4416 delayed()->mov(diff, result); 4417 } 4418 4419 // Shift str1 and str2 to the end of the arrays, negate limit 4420 add(str1, limit1, str1); 4421 add(str2, limit2, str2); 4422 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4423 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4424 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4425 } 4426 4427 // Compare the rest of the characters 4428 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4429 4430 bind(Lloop); 4431 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4432 4433 subcc(chr1, chr2, chr1); 4434 br(Assembler::notZero, false, Assembler::pt, Ldone); 4435 assert(chr1 == result, "result must be pre-placed"); 4436 delayed()->inccc(limit1, stride1); 4437 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4438 inccc(limit2, stride2); 4439 } 4440 4441 // annul LDUB if branch is not taken to prevent access past end of string 4442 br(Assembler::notZero, true, Assembler::pt, Lloop); 4443 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4444 4445 // If strings are equal up to min length, return the length difference. 4446 if (ae == StrIntrinsicNode::UU) { 4447 // Divide by 2 to get number of chars 4448 sra(diff, 1, result); 4449 } else { 4450 mov(diff, result); 4451 } 4452 4453 // Otherwise, return the difference between the first mismatched chars. 4454 bind(Ldone); 4455 if(ae == StrIntrinsicNode::UL) { 4456 // Negate result (see note above) 4457 neg(result); 4458 } 4459 } 4460 4461 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4462 Register limit, Register tmp, Register result, bool is_byte) { 4463 Label Ldone, Lloop, Lremaining; 4464 assert_different_registers(ary1, ary2, limit, tmp, result); 4465 4466 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4467 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4468 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4469 4470 if (is_array_equ) { 4471 // return true if the same array 4472 cmp(ary1, ary2); 4473 brx(Assembler::equal, true, Assembler::pn, Ldone); 4474 delayed()->mov(1, result); // equal 4475 4476 br_null(ary1, true, Assembler::pn, Ldone); 4477 delayed()->clr(result); // not equal 4478 4479 br_null(ary2, true, Assembler::pn, Ldone); 4480 delayed()->clr(result); // not equal 4481 4482 // load the lengths of arrays 4483 ld(Address(ary1, length_offset), limit); 4484 ld(Address(ary2, length_offset), tmp); 4485 4486 // return false if the two arrays are not equal length 4487 cmp(limit, tmp); 4488 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4489 delayed()->clr(result); // not equal 4490 } 4491 4492 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4493 delayed()->mov(1, result); // zero-length arrays are equal 4494 4495 if (is_array_equ) { 4496 // load array addresses 4497 add(ary1, base_offset, ary1); 4498 add(ary2, base_offset, ary2); 4499 // set byte count 4500 if (!is_byte) { 4501 sll(limit, exact_log2(sizeof(jchar)), limit); 4502 } 4503 } else { 4504 // We have no guarantee that on 64 bit the higher half of limit is 0 4505 signx(limit); 4506 } 4507 4508 #ifdef ASSERT 4509 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4510 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4511 Label Laligned; 4512 or3(ary1, ary2, tmp); 4513 andcc(tmp, 7, tmp); 4514 br_null_short(tmp, Assembler::pn, Laligned); 4515 STOP("First array element is not 8-byte aligned."); 4516 should_not_reach_here(); 4517 bind(Laligned); 4518 #endif 4519 4520 // Shift ary1 and ary2 to the end of the arrays, negate limit 4521 add(ary1, limit, ary1); 4522 add(ary2, limit, ary2); 4523 neg(limit, limit); 4524 4525 // MAIN LOOP 4526 // Load and compare array elements of size 'byte_width' until the elements are not 4527 // equal or we reached the end of the arrays. If the size of the arrays is not a 4528 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4529 // compare the remaining bytes below by skipping the garbage bytes. 4530 ldx(ary1, limit, result); 4531 bind(Lloop); 4532 ldx(ary2, limit, tmp); 4533 inccc(limit, 8); 4534 // Bail out if we reached the end (but still do the comparison) 4535 br(Assembler::positive, false, Assembler::pn, Lremaining); 4536 delayed()->cmp(result, tmp); 4537 // Check equality of elements 4538 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4539 delayed()->ldx(ary1, limit, result); 4540 4541 ba(Ldone); 4542 delayed()->clr(result); // not equal 4543 4544 // TAIL COMPARISON 4545 // We got here because we reached the end of the arrays. 'limit' is the number of 4546 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4547 // out the garbage and compare the remaining elements. 4548 bind(Lremaining); 4549 // Optimistic shortcut: elements potentially including garbage are equal 4550 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4551 delayed()->mov(1, result); // equal 4552 // Shift 'limit' bytes to the right and compare 4553 sll(limit, 3, limit); // bytes to bits 4554 srlx(result, limit, result); 4555 srlx(tmp, limit, tmp); 4556 cmp(result, tmp); 4557 clr(result); 4558 movcc(Assembler::equal, false, xcc, 1, result); 4559 4560 bind(Ldone); 4561 } 4562 4563 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4564 4565 // test for negative bytes in input string of a given size 4566 // result 1 if found, 0 otherwise. 4567 4568 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4569 4570 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4571 4572 Register i = result; // result used as integer index i until very end 4573 Register lmask = t2; // t2 is aliased to lmask 4574 4575 // INITIALIZATION 4576 // =========================================================== 4577 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4578 // compute unaligned offset -> i 4579 // compute core end index -> t5 4580 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4581 add(t2, 0x80, t2); 4582 sllx(t2, 32, t3); 4583 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4584 sra(size,0,size); 4585 andcc(inp, 0x7, i); // unaligned offset -> i 4586 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4587 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4588 4589 // =========================================================== 4590 4591 // UNALIGNED HEAD 4592 // =========================================================== 4593 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4594 // * obliterate (ignore) bytes outside string by shifting off reg ends 4595 // * compare with bitmask, short circuit return true if one or more high 4596 // bits set. 4597 cmp(size, 0); 4598 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4599 delayed()->mov(0,result); // annuled so i not clobbered for following 4600 neg(i, t4); 4601 add(i, size, t5); 4602 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4603 mov(8, t4); 4604 sub(t4, t5, t4); 4605 sra(t4, 31, t5); 4606 andn(t4, t5, t5); 4607 add(i, t5, t4); 4608 sll(t5, 3, t5); 4609 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4610 srlx(t3, t5, t3); 4611 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4612 andcc(lmask, t3, G0); 4613 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4614 delayed()->mov(1,result); // annuled so i not clobbered for following 4615 add(size, -8, t5); // core end index -> t5 4616 mov(8, t4); 4617 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4618 // =========================================================== 4619 4620 // ALIGNED CORE 4621 // =========================================================== 4622 // * iterate index i over aligned 8B sections of core, comparing with 4623 // bitmask, short circuit return true if one or more high bits set 4624 // t5 contains core end index/loop limit which is the index 4625 // of the MSB of last (unaligned) 8B fully contained in the string. 4626 // inp contains address of first byte in string/array 4627 // lmask contains 8B high bit mask for comparison 4628 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4629 bind(Lcore); 4630 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4631 bind(Lcore_rpt); 4632 ldx(inp, i, t3); 4633 andcc(t3, lmask, G0); 4634 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4635 delayed()->mov(1, result); // annuled so i not clobbered for following 4636 add(i, 8, i); 4637 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4638 // =========================================================== 4639 4640 // ALIGNED TAIL (<8B) 4641 // =========================================================== 4642 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4643 // string bytes by shifting them off end, compare what's left with bitmask 4644 // inp contains address of first byte in string/array 4645 // lmask contains 8B high bit mask for comparison 4646 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4647 bind(Ltail); 4648 subcc(size, i, t4); // # of remaining bytes in string -> t4 4649 // return 0 if no more remaining bytes 4650 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4651 delayed()->mov(0, result); // annuled so i not clobbered for following 4652 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4653 mov(8, t5); 4654 sub(t5, t4, t4); 4655 mov(0, result); // ** i clobbered at this point 4656 sll(t4, 3, t4); // bits beyond end of string -> t4 4657 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4658 andcc(lmask, t3, G0); 4659 movcc(Assembler::notZero, false, xcc, 1, result); 4660 bind(Lreturn); 4661 } 4662 4663 #endif 4664 4665 4666 // Use BIS for zeroing (count is in bytes). 4667 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4668 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4669 Register end = count; 4670 int cache_line_size = VM_Version::prefetch_data_size(); 4671 assert(cache_line_size > 0, "cache line size should be known for this code"); 4672 // Minimum count when BIS zeroing can be used since 4673 // it needs membar which is expensive. 4674 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4675 4676 Label small_loop; 4677 // Check if count is negative (dead code) or zero. 4678 // Note, count uses 64bit in 64 bit VM. 4679 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4680 4681 // Use BIS zeroing only for big arrays since it requires membar. 4682 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4683 cmp(count, block_zero_size); 4684 } else { 4685 set(block_zero_size, temp); 4686 cmp(count, temp); 4687 } 4688 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4689 delayed()->add(to, count, end); 4690 4691 // Note: size is >= three (32 bytes) cache lines. 4692 4693 // Clean the beginning of space up to next cache line. 4694 for (int offs = 0; offs < cache_line_size; offs += 8) { 4695 stx(G0, to, offs); 4696 } 4697 4698 // align to next cache line 4699 add(to, cache_line_size, to); 4700 and3(to, -cache_line_size, to); 4701 4702 // Note: size left >= two (32 bytes) cache lines. 4703 4704 // BIS should not be used to zero tail (64 bytes) 4705 // to avoid zeroing a header of the following object. 4706 sub(end, (cache_line_size*2)-8, end); 4707 4708 Label bis_loop; 4709 bind(bis_loop); 4710 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4711 add(to, cache_line_size, to); 4712 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4713 4714 // BIS needs membar. 4715 membar(Assembler::StoreLoad); 4716 4717 add(end, (cache_line_size*2)-8, end); // restore end 4718 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4719 4720 // Clean the tail. 4721 bind(small_loop); 4722 stx(G0, to, 0); 4723 add(to, 8, to); 4724 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4725 nop(); // Separate short branches 4726 } 4727 4728 /** 4729 * Update CRC-32[C] with a byte value according to constants in table 4730 * 4731 * @param [in,out]crc Register containing the crc. 4732 * @param [in]val Register containing the byte to fold into the CRC. 4733 * @param [in]table Register containing the table of crc constants. 4734 * 4735 * uint32_t crc; 4736 * val = crc_table[(val ^ crc) & 0xFF]; 4737 * crc = val ^ (crc >> 8); 4738 */ 4739 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4740 xor3(val, crc, val); 4741 and3(val, 0xFF, val); 4742 sllx(val, 2, val); 4743 lduw(table, val, val); 4744 srlx(crc, 8, crc); 4745 xor3(val, crc, crc); 4746 } 4747 4748 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4749 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4750 srlx(src, 24, dst); 4751 4752 sllx(src, 32+8, tmp); 4753 srlx(tmp, 32+24, tmp); 4754 sllx(tmp, 8, tmp); 4755 or3(dst, tmp, dst); 4756 4757 sllx(src, 32+16, tmp); 4758 srlx(tmp, 32+24, tmp); 4759 sllx(tmp, 16, tmp); 4760 or3(dst, tmp, dst); 4761 4762 sllx(src, 32+24, tmp); 4763 srlx(tmp, 32, tmp); 4764 or3(dst, tmp, dst); 4765 } 4766 4767 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4768 reverse_bytes_32(src, tmp1, tmp2); 4769 movxtod(tmp1, dst); 4770 } 4771 4772 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4773 movdtox(src, tmp1); 4774 reverse_bytes_32(tmp1, dst, tmp2); 4775 } 4776 4777 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4778 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4779 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4780 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4781 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4782 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4783 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4784 ldxl(buf, G0, xtmp_lo); 4785 inc(buf, 8); 4786 ldxl(buf, G0, xtmp_hi); 4787 inc(buf, 8); 4788 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4789 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4790 } 4791 4792 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4793 mov(xcrc_lo, xtmp_lo); 4794 mov(xcrc_hi, xtmp_hi); 4795 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4796 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4797 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4798 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4799 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4800 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4801 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4802 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4803 } 4804 4805 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4806 and3(xcrc, 0xFF, tmp); 4807 sllx(tmp, 2, tmp); 4808 lduw(table, tmp, xtmp); 4809 srlx(xcrc, 8, xcrc); 4810 xor3(xtmp, xcrc, xcrc); 4811 } 4812 4813 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4814 and3(crc, 0xFF, tmp); 4815 srlx(crc, 8, crc); 4816 sllx(tmp, 2, tmp); 4817 lduw(table, tmp, tmp); 4818 xor3(tmp, crc, crc); 4819 } 4820 4821 #define CRC32_TMP_REG_NUM 18 4822 4823 #define CRC32_CONST_64 0x163cd6124 4824 #define CRC32_CONST_96 0x0ccaa009e 4825 #define CRC32_CONST_160 0x1751997d0 4826 #define CRC32_CONST_480 0x1c6e41596 4827 #define CRC32_CONST_544 0x154442bd4 4828 4829 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4830 4831 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4832 Label L_main_loop_prologue; 4833 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4834 Label L_fold_tail, L_fold_tail_loop; 4835 Label L_8byte_fold_loop, L_8byte_fold_check; 4836 4837 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4838 4839 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4840 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4841 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4842 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4843 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4844 4845 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4846 4847 not1(crc); // ~c 4848 clruwu(crc); // clear upper 32 bits of crc 4849 4850 // Check if below cutoff, proceed directly to cleanup code 4851 mov(31, G4); 4852 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4853 4854 // Align buffer to 8 byte boundry 4855 mov(8, O5); 4856 and3(buf, 0x7, O4); 4857 sub(O5, O4, O5); 4858 and3(O5, 0x7, O5); 4859 sub(len, O5, len); 4860 ba(L_align_check); 4861 delayed()->nop(); 4862 4863 // Alignment loop, table look up method for up to 7 bytes 4864 bind(L_align_loop); 4865 ldub(buf, 0, O4); 4866 inc(buf); 4867 dec(O5); 4868 xor3(O4, crc, O4); 4869 and3(O4, 0xFF, O4); 4870 sllx(O4, 2, O4); 4871 lduw(table, O4, O4); 4872 srlx(crc, 8, crc); 4873 xor3(O4, crc, crc); 4874 bind(L_align_check); 4875 nop(); 4876 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4877 4878 // Aligned on 64-bit (8-byte) boundry at this point 4879 // Check if still above cutoff (31-bytes) 4880 mov(31, G4); 4881 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4882 // At least 32 bytes left to process 4883 4884 // Free up registers by storing them to FP registers 4885 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4886 movxtod(tmp[i], as_FloatRegister(2*i)); 4887 } 4888 4889 // Determine which loop to enter 4890 // Shared prologue 4891 ldxl(buf, G0, tmp[0]); 4892 inc(buf, 8); 4893 ldxl(buf, G0, tmp[1]); 4894 inc(buf, 8); 4895 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4896 and3(crc, 0, crc); // Clear out the crc register 4897 // Main loop needs 128-bytes at least 4898 mov(128, G4); 4899 mov(64, tmp[2]); 4900 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4901 // Less than 64 bytes 4902 nop(); 4903 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4904 // Between 64 and 127 bytes 4905 set64(CRC32_CONST_96, const_96, tmp[8]); 4906 set64(CRC32_CONST_160, const_160, tmp[9]); 4907 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4908 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4909 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4910 dec(len, 48); 4911 ba(L_fold_tail); 4912 delayed()->nop(); 4913 4914 bind(L_main_loop_prologue); 4915 for (int i = 2; i < 8; i++) { 4916 ldxl(buf, G0, tmp[i]); 4917 inc(buf, 8); 4918 } 4919 4920 // Fold total 512 bits of polynomial on each iteration, 4921 // 128 bits per each of 4 parallel streams 4922 set64(CRC32_CONST_480, const_480, tmp[8]); 4923 set64(CRC32_CONST_544, const_544, tmp[9]); 4924 4925 mov(128, G4); 4926 bind(L_fold_512b_loop); 4927 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4928 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4929 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4930 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4931 dec(len, 64); 4932 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4933 4934 // Fold 512 bits to 128 bits 4935 bind(L_fold_512b); 4936 set64(CRC32_CONST_96, const_96, tmp[8]); 4937 set64(CRC32_CONST_160, const_160, tmp[9]); 4938 4939 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4940 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4941 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4942 dec(len, 48); 4943 4944 // Fold the rest of 128 bits data chunks 4945 bind(L_fold_tail); 4946 mov(32, G4); 4947 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4948 4949 set64(CRC32_CONST_96, const_96, tmp[8]); 4950 set64(CRC32_CONST_160, const_160, tmp[9]); 4951 4952 bind(L_fold_tail_loop); 4953 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4954 sub(len, 16, len); 4955 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4956 4957 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4958 bind(L_fold_128b); 4959 4960 set64(CRC32_CONST_64, const_64, tmp[4]); 4961 4962 xmulx(const_64, tmp[0], tmp[2]); 4963 xmulxhi(const_64, tmp[0], tmp[3]); 4964 4965 srl(tmp[2], G0, tmp[4]); 4966 xmulx(const_64, tmp[4], tmp[4]); 4967 4968 srlx(tmp[2], 32, tmp[2]); 4969 sllx(tmp[3], 32, tmp[3]); 4970 or3(tmp[2], tmp[3], tmp[2]); 4971 4972 xor3(tmp[4], tmp[1], tmp[4]); 4973 xor3(tmp[4], tmp[2], tmp[1]); 4974 dec(len, 8); 4975 4976 // Use table lookup for the 8 bytes left in tmp[1] 4977 dec(len, 8); 4978 4979 // 8 8-bit folds to compute 32-bit CRC. 4980 for (int j = 0; j < 4; j++) { 4981 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4982 } 4983 srl(tmp[1], G0, crc); // move 32 bits to general register 4984 for (int j = 0; j < 4; j++) { 4985 fold_8bit_crc32(crc, table, tmp[3]); 4986 } 4987 4988 bind(L_8byte_fold_check); 4989 4990 // Restore int registers saved in FP registers 4991 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4992 movdtox(as_FloatRegister(2*i), tmp[i]); 4993 } 4994 4995 ba(L_cleanup_check); 4996 delayed()->nop(); 4997 4998 // Table look-up method for the remaining few bytes 4999 bind(L_cleanup_loop); 5000 ldub(buf, 0, O4); 5001 inc(buf); 5002 dec(len); 5003 xor3(O4, crc, O4); 5004 and3(O4, 0xFF, O4); 5005 sllx(O4, 2, O4); 5006 lduw(table, O4, O4); 5007 srlx(crc, 8, crc); 5008 xor3(O4, crc, crc); 5009 bind(L_cleanup_check); 5010 nop(); 5011 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 5012 5013 not1(crc); 5014 } 5015 5016 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 5017 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 5018 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 5019 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 5020 5021 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 5022 5023 Label L_crc32c_head, L_crc32c_aligned; 5024 Label L_crc32c_parallel, L_crc32c_parallel_loop; 5025 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 5026 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 5027 5028 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 5029 5030 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 5031 5032 // clear upper 32 bits of crc 5033 clruwu(crc); 5034 5035 and3(buf, 7, G4); 5036 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 5037 5038 mov(8, G1); 5039 sub(G1, G4, G4); 5040 5041 // ------ process the misaligned head (7 bytes or less) ------ 5042 bind(L_crc32c_head); 5043 5044 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5045 ldub(buf, 0, G1); 5046 update_byte_crc32(crc, G1, table); 5047 5048 inc(buf); 5049 dec(len); 5050 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 5051 dec(G4); 5052 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 5053 5054 // ------ process the 8-byte-aligned body ------ 5055 bind(L_crc32c_aligned); 5056 nop(); 5057 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 5058 5059 // reverse the byte order of lower 32 bits to big endian, and move to FP side 5060 movitof_revbytes(crc, F0, G1, G3); 5061 5062 set(CHUNK_LEN*8*4, G4); 5063 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 5064 5065 // ------ process four 1KB chunks in parallel ------ 5066 bind(L_crc32c_parallel); 5067 5068 fzero(FloatRegisterImpl::D, F2); 5069 fzero(FloatRegisterImpl::D, F4); 5070 fzero(FloatRegisterImpl::D, F6); 5071 5072 mov(CHUNK_LEN - 1, G4); 5073 bind(L_crc32c_parallel_loop); 5074 // schedule ldf's ahead of crc32c's to hide the load-use latency 5075 ldf(FloatRegisterImpl::D, buf, 0, F8); 5076 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5077 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5078 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 5079 crc32c(F0, F8, F0); 5080 crc32c(F2, F10, F2); 5081 crc32c(F4, F12, F4); 5082 crc32c(F6, F14, F6); 5083 inc(buf, 8); 5084 dec(G4); 5085 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 5086 5087 ldf(FloatRegisterImpl::D, buf, 0, F8); 5088 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5089 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5090 crc32c(F0, F8, F0); 5091 crc32c(F2, F10, F2); 5092 crc32c(F4, F12, F4); 5093 5094 inc(buf, CHUNK_LEN*24); 5095 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 5096 inc(buf, 8); 5097 5098 prefetch(buf, 0, Assembler::severalReads); 5099 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 5100 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 5101 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 5102 5103 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5104 movftoi_revbytes(F0, O4, G1, G4); 5105 movftoi_revbytes(F2, O5, G1, G4); 5106 movftoi_revbytes(F4, G5, G1, G4); 5107 5108 // combine the results of 4 chunks 5109 set64(CHUNK_K1, G3, G1); 5110 xmulx(O4, G3, O4); 5111 set64(CHUNK_K2, G3, G1); 5112 xmulx(O5, G3, O5); 5113 set64(CHUNK_K3, G3, G1); 5114 xmulx(G5, G3, G5); 5115 5116 movdtox(F14, G4); 5117 xor3(O4, O5, O5); 5118 xor3(G5, O5, O5); 5119 xor3(G4, O5, O5); 5120 5121 // reverse the byte order to big endian, via stack, and move to FP side 5122 // TODO: use new revb instruction 5123 add(SP, -8, G1); 5124 srlx(G1, 3, G1); 5125 sllx(G1, 3, G1); 5126 stx(O5, G1, G0); 5127 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5128 5129 crc32c(F6, F2, F0); 5130 5131 set(CHUNK_LEN*8*4, G4); 5132 sub(len, G4, len); 5133 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5134 nop(); 5135 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5136 5137 bind(L_crc32c_serial); 5138 5139 mov(32, G4); 5140 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5141 5142 // ------ process 32B chunks ------ 5143 bind(L_crc32c_x32_loop); 5144 ldf(FloatRegisterImpl::D, buf, 0, F2); 5145 crc32c(F0, F2, F0); 5146 ldf(FloatRegisterImpl::D, buf, 8, F2); 5147 crc32c(F0, F2, F0); 5148 ldf(FloatRegisterImpl::D, buf, 16, F2); 5149 crc32c(F0, F2, F0); 5150 ldf(FloatRegisterImpl::D, buf, 24, F2); 5151 inc(buf, 32); 5152 crc32c(F0, F2, F0); 5153 dec(len, 32); 5154 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5155 5156 bind(L_crc32c_x8); 5157 nop(); 5158 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5159 5160 // ------ process 8B chunks ------ 5161 bind(L_crc32c_x8_loop); 5162 ldf(FloatRegisterImpl::D, buf, 0, F2); 5163 inc(buf, 8); 5164 crc32c(F0, F2, F0); 5165 dec(len, 8); 5166 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5167 5168 bind(L_crc32c_done); 5169 5170 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5171 movftoi_revbytes(F0, crc, G1, G3); 5172 5173 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5174 5175 // ------ process the misaligned tail (7 bytes or less) ------ 5176 bind(L_crc32c_tail); 5177 5178 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5179 ldub(buf, 0, G1); 5180 update_byte_crc32(crc, G1, table); 5181 5182 inc(buf); 5183 dec(len); 5184 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5185 5186 bind(L_crc32c_return); 5187 nop(); 5188 }