1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/accessDecorators.hpp" 36 #include "oops/compressedOops.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/biasedLocking.hpp" 40 #include "runtime/flags/flagSetting.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/jniHandles.inline.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/os.inline.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/safepointMechanism.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/macros.hpp" 51 #include "utilities/powerOfTwo.hpp" 52 53 #ifdef PRODUCT 54 #define BLOCK_COMMENT(str) /* nothing */ 55 #define STOP(error) stop(error) 56 #else 57 #define BLOCK_COMMENT(str) block_comment(str) 58 #define STOP(error) block_comment(error); stop(error) 59 #endif 60 61 // Convert the raw encoding form into the form expected by the 62 // constructor for Address. 63 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 64 assert(scale == 0, "not supported"); 65 RelocationHolder rspec; 66 if (disp_reloc != relocInfo::none) { 67 rspec = Relocation::spec_simple(disp_reloc); 68 } 69 70 Register rindex = as_Register(index); 71 if (rindex != G0) { 72 Address madr(as_Register(base), rindex); 73 madr._rspec = rspec; 74 return madr; 75 } else { 76 Address madr(as_Register(base), disp); 77 madr._rspec = rspec; 78 return madr; 79 } 80 } 81 82 Address Argument::address_in_frame() const { 83 // Warning: In LP64 mode disp will occupy more than 10 bits, but 84 // op codes such as ld or ldx, only access disp() to get 85 // their simm13 argument. 86 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 87 if (is_in()) 88 return Address(FP, disp); // In argument. 89 else 90 return Address(SP, disp); // Out argument. 91 } 92 93 static const char* argumentNames[][2] = { 94 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 95 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 96 {"A(n>9)","P(n>9)"} 97 }; 98 99 const char* Argument::name() const { 100 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 101 int num = number(); 102 if (num >= nofArgs) num = nofArgs - 1; 103 return argumentNames[num][is_in() ? 1 : 0]; 104 } 105 106 #ifdef ASSERT 107 // On RISC, there's no benefit to verifying instruction boundaries. 108 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 109 #endif 110 111 // Patch instruction inst at offset inst_pos to refer to dest_pos 112 // and return the resulting instruction. 113 // We should have pcs, not offsets, but since all is relative, it will work out 114 // OK. 115 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 116 int m; // mask for displacement field 117 int v; // new value for displacement field 118 const int word_aligned_ones = -4; 119 switch (inv_op(inst)) { 120 default: ShouldNotReachHere(); 121 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 122 case branch_op: 123 switch (inv_op2(inst)) { 124 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 125 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 126 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 127 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 128 case bpr_op2: { 129 if (is_cbcond(inst)) { 130 m = wdisp10(word_aligned_ones, 0); 131 v = wdisp10(dest_pos, inst_pos); 132 } else { 133 m = wdisp16(word_aligned_ones, 0); 134 v = wdisp16(dest_pos, inst_pos); 135 } 136 break; 137 } 138 default: ShouldNotReachHere(); 139 } 140 } 141 return inst & ~m | v; 142 } 143 144 // Return the offset of the branch destionation of instruction inst 145 // at offset pos. 146 // Should have pcs, but since all is relative, it works out. 147 int MacroAssembler::branch_destination(int inst, int pos) { 148 int r; 149 switch (inv_op(inst)) { 150 default: ShouldNotReachHere(); 151 case call_op: r = inv_wdisp(inst, pos, 30); break; 152 case branch_op: 153 switch (inv_op2(inst)) { 154 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 155 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 156 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 157 case br_op2: r = inv_wdisp( inst, pos, 22); break; 158 case bpr_op2: { 159 if (is_cbcond(inst)) { 160 r = inv_wdisp10(inst, pos); 161 } else { 162 r = inv_wdisp16(inst, pos); 163 } 164 break; 165 } 166 default: ShouldNotReachHere(); 167 } 168 } 169 return r; 170 } 171 172 void MacroAssembler::resolve_jobject(Register value, Register tmp) { 173 Label done, not_weak; 174 br_null(value, false, Assembler::pn, done); // Use NULL as-is. 175 delayed()->andcc(value, JNIHandles::weak_tag_mask, G0); // Test for jweak 176 brx(Assembler::zero, true, Assembler::pt, not_weak); 177 delayed()->nop(); 178 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 179 Address(value, -JNIHandles::weak_tag_value), value, tmp); 180 verify_oop(value); 181 br (Assembler::always, true, Assembler::pt, done); 182 delayed()->nop(); 183 bind(not_weak); 184 access_load_at(T_OBJECT, IN_NATIVE, Address(value, 0), value, tmp); 185 verify_oop(value); 186 bind(done); 187 } 188 189 void MacroAssembler::null_check(Register reg, int offset) { 190 if (needs_explicit_null_check((intptr_t)offset)) { 191 // provoke OS NULL exception if reg = NULL by 192 // accessing M[reg] w/o changing any registers 193 ld_ptr(reg, 0, G0); 194 } 195 else { 196 // nothing to do, (later) access of M[reg + offset] 197 // will provoke OS NULL exception if reg = NULL 198 } 199 } 200 201 // Ring buffer jumps 202 203 204 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 205 assert_not_delayed(); 206 jmpl(r1, r2, G0); 207 } 208 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 209 assert_not_delayed(); 210 jmp(r1, offset); 211 } 212 213 // This code sequence is relocatable to any address, even on LP64. 214 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 215 assert_not_delayed(); 216 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 217 // variable length instruction streams. 218 patchable_sethi(addrlit, temp); 219 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 220 jmpl(a.base(), a.disp(), d); 221 } 222 223 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 224 jumpl(addrlit, temp, G0, offset, file, line); 225 } 226 227 228 // Conditional breakpoint (for assertion checks in assembly code) 229 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 230 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 231 } 232 233 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 234 void MacroAssembler::breakpoint_trap() { 235 trap(ST_RESERVED_FOR_USER_0); 236 } 237 238 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 239 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 240 // Armed page has poll bit set. 241 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 242 br_notnull(temp_reg, a, Assembler::pn, slow_path); 243 } 244 245 void MacroAssembler::enter() { 246 Unimplemented(); 247 } 248 249 void MacroAssembler::leave() { 250 Unimplemented(); 251 } 252 253 // Calls to C land 254 255 #ifdef ASSERT 256 // a hook for debugging 257 static Thread* reinitialize_thread() { 258 return Thread::current(); 259 } 260 #else 261 #define reinitialize_thread Thread::current 262 #endif 263 264 #ifdef ASSERT 265 address last_get_thread = NULL; 266 #endif 267 268 // call this when G2_thread is not known to be valid 269 void MacroAssembler::get_thread() { 270 save_frame(0); // to avoid clobbering O0 271 mov(G1, L0); // avoid clobbering G1 272 mov(G5_method, L1); // avoid clobbering G5 273 mov(G3, L2); // avoid clobbering G3 also 274 mov(G4, L5); // avoid clobbering G4 275 #ifdef ASSERT 276 AddressLiteral last_get_thread_addrlit(&last_get_thread); 277 set(last_get_thread_addrlit, L3); 278 rdpc(L4); 279 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 280 #endif 281 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 282 delayed()->nop(); 283 mov(L0, G1); 284 mov(L1, G5_method); 285 mov(L2, G3); 286 mov(L5, G4); 287 restore(O0, 0, G2_thread); 288 } 289 290 static Thread* verify_thread_subroutine(Thread* gthread_value) { 291 Thread* correct_value = Thread::current(); 292 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 293 return correct_value; 294 } 295 296 void MacroAssembler::verify_thread() { 297 if (VerifyThread) { 298 // NOTE: this chops off the heads of the 64-bit O registers. 299 // make sure G2_thread contains the right value 300 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 301 mov(G1, L1); // avoid clobbering G1 302 // G2 saved below 303 mov(G3, L3); // avoid clobbering G3 304 mov(G4, L4); // avoid clobbering G4 305 mov(G5_method, L5); // avoid clobbering G5_method 306 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 307 delayed()->mov(G2_thread, O0); 308 309 mov(L1, G1); // Restore G1 310 // G2 restored below 311 mov(L3, G3); // restore G3 312 mov(L4, G4); // restore G4 313 mov(L5, G5_method); // restore G5_method 314 restore(O0, 0, G2_thread); 315 } 316 } 317 318 319 void MacroAssembler::save_thread(const Register thread_cache) { 320 verify_thread(); 321 if (thread_cache->is_valid()) { 322 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 323 mov(G2_thread, thread_cache); 324 } 325 if (VerifyThread) { 326 // smash G2_thread, as if the VM were about to anyway 327 set(0x67676767, G2_thread); 328 } 329 } 330 331 332 void MacroAssembler::restore_thread(const Register thread_cache) { 333 if (thread_cache->is_valid()) { 334 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 335 mov(thread_cache, G2_thread); 336 verify_thread(); 337 } else { 338 // do it the slow way 339 get_thread(); 340 } 341 } 342 343 344 // %%% maybe get rid of [re]set_last_Java_frame 345 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 346 assert_not_delayed(); 347 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 348 JavaFrameAnchor::flags_offset()); 349 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 350 351 // Always set last_Java_pc and flags first because once last_Java_sp is visible 352 // has_last_Java_frame is true and users will look at the rest of the fields. 353 // (Note: flags should always be zero before we get here so doesn't need to be set.) 354 355 #ifdef ASSERT 356 // Verify that flags was zeroed on return to Java 357 Label PcOk; 358 save_frame(0); // to avoid clobbering O0 359 ld_ptr(pc_addr, L0); 360 br_null_short(L0, Assembler::pt, PcOk); 361 STOP("last_Java_pc not zeroed before leaving Java"); 362 bind(PcOk); 363 364 // Verify that flags was zeroed on return to Java 365 Label FlagsOk; 366 ld(flags, L0); 367 tst(L0); 368 br(Assembler::zero, false, Assembler::pt, FlagsOk); 369 delayed() -> restore(); 370 STOP("flags not zeroed before leaving Java"); 371 bind(FlagsOk); 372 #endif /* ASSERT */ 373 // 374 // When returning from calling out from Java mode the frame anchor's last_Java_pc 375 // will always be set to NULL. It is set here so that if we are doing a call to 376 // native (not VM) that we capture the known pc and don't have to rely on the 377 // native call having a standard frame linkage where we can find the pc. 378 379 if (last_Java_pc->is_valid()) { 380 st_ptr(last_Java_pc, pc_addr); 381 } 382 383 #ifdef ASSERT 384 // Make sure that we have an odd stack 385 Label StackOk; 386 andcc(last_java_sp, 0x01, G0); 387 br(Assembler::notZero, false, Assembler::pt, StackOk); 388 delayed()->nop(); 389 STOP("Stack Not Biased in set_last_Java_frame"); 390 bind(StackOk); 391 #endif // ASSERT 392 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 393 add( last_java_sp, STACK_BIAS, G4_scratch ); 394 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 395 } 396 397 void MacroAssembler::reset_last_Java_frame(void) { 398 assert_not_delayed(); 399 400 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 401 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 402 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 403 404 #ifdef ASSERT 405 // check that it WAS previously set 406 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 407 ld_ptr(sp_addr, L0); 408 tst(L0); 409 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 410 restore(); 411 #endif // ASSERT 412 413 st_ptr(G0, sp_addr); 414 // Always return last_Java_pc to zero 415 st_ptr(G0, pc_addr); 416 // Always null flags after return to Java 417 st(G0, flags); 418 } 419 420 421 void MacroAssembler::call_VM_base( 422 Register oop_result, 423 Register thread_cache, 424 Register last_java_sp, 425 address entry_point, 426 int number_of_arguments, 427 bool check_exceptions) 428 { 429 assert_not_delayed(); 430 431 // determine last_java_sp register 432 if (!last_java_sp->is_valid()) { 433 last_java_sp = SP; 434 } 435 // debugging support 436 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 437 438 // 64-bit last_java_sp is biased! 439 set_last_Java_frame(last_java_sp, noreg); 440 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 441 save_thread(thread_cache); 442 // do the call 443 call(entry_point, relocInfo::runtime_call_type); 444 if (!VerifyThread) 445 delayed()->mov(G2_thread, O0); // pass thread as first argument 446 else 447 delayed()->nop(); // (thread already passed) 448 restore_thread(thread_cache); 449 reset_last_Java_frame(); 450 451 // check for pending exceptions. use Gtemp as scratch register. 452 if (check_exceptions) { 453 check_and_forward_exception(Gtemp); 454 } 455 456 #ifdef ASSERT 457 set(badHeapWordVal, G3); 458 set(badHeapWordVal, G4); 459 set(badHeapWordVal, G5); 460 #endif 461 462 // get oop result if there is one and reset the value in the thread 463 if (oop_result->is_valid()) { 464 get_vm_result(oop_result); 465 } 466 } 467 468 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 469 { 470 Label L; 471 472 check_and_handle_popframe(scratch_reg); 473 check_and_handle_earlyret(scratch_reg); 474 475 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 476 ld_ptr(exception_addr, scratch_reg); 477 br_null_short(scratch_reg, pt, L); 478 // we use O7 linkage so that forward_exception_entry has the issuing PC 479 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 480 delayed()->nop(); 481 bind(L); 482 } 483 484 485 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 486 } 487 488 489 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 490 } 491 492 493 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 494 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 495 } 496 497 498 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 499 // O0 is reserved for the thread 500 mov(arg_1, O1); 501 call_VM(oop_result, entry_point, 1, check_exceptions); 502 } 503 504 505 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 506 // O0 is reserved for the thread 507 mov(arg_1, O1); 508 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 509 call_VM(oop_result, entry_point, 2, check_exceptions); 510 } 511 512 513 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 514 // O0 is reserved for the thread 515 mov(arg_1, O1); 516 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 517 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 518 call_VM(oop_result, entry_point, 3, check_exceptions); 519 } 520 521 522 523 // Note: The following call_VM overloadings are useful when a "save" 524 // has already been performed by a stub, and the last Java frame is 525 // the previous one. In that case, last_java_sp must be passed as FP 526 // instead of SP. 527 528 529 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 530 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 531 } 532 533 534 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 535 // O0 is reserved for the thread 536 mov(arg_1, O1); 537 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 538 } 539 540 541 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 542 // O0 is reserved for the thread 543 mov(arg_1, O1); 544 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 545 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 546 } 547 548 549 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 550 // O0 is reserved for the thread 551 mov(arg_1, O1); 552 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 553 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 554 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 555 } 556 557 558 559 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 560 assert_not_delayed(); 561 save_thread(thread_cache); 562 // do the call 563 call(entry_point, relocInfo::runtime_call_type); 564 delayed()->nop(); 565 restore_thread(thread_cache); 566 #ifdef ASSERT 567 set(badHeapWordVal, G3); 568 set(badHeapWordVal, G4); 569 set(badHeapWordVal, G5); 570 #endif 571 } 572 573 574 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 575 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 576 } 577 578 579 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 580 mov(arg_1, O0); 581 call_VM_leaf(thread_cache, entry_point, 1); 582 } 583 584 585 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 586 mov(arg_1, O0); 587 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 588 call_VM_leaf(thread_cache, entry_point, 2); 589 } 590 591 592 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 593 mov(arg_1, O0); 594 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 595 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 596 call_VM_leaf(thread_cache, entry_point, 3); 597 } 598 599 600 void MacroAssembler::get_vm_result(Register oop_result) { 601 verify_thread(); 602 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 603 ld_ptr( vm_result_addr, oop_result); 604 st_ptr(G0, vm_result_addr); 605 verify_oop(oop_result); 606 } 607 608 609 void MacroAssembler::get_vm_result_2(Register metadata_result) { 610 verify_thread(); 611 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 612 ld_ptr(vm_result_addr_2, metadata_result); 613 st_ptr(G0, vm_result_addr_2); 614 } 615 616 617 // We require that C code which does not return a value in vm_result will 618 // leave it undisturbed. 619 void MacroAssembler::set_vm_result(Register oop_result) { 620 verify_thread(); 621 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 622 verify_oop(oop_result); 623 624 # ifdef ASSERT 625 // Check that we are not overwriting any other oop. 626 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 627 ld_ptr(vm_result_addr, L0); 628 tst(L0); 629 restore(); 630 breakpoint_trap(notZero, Assembler::ptr_cc); 631 // } 632 # endif 633 634 st_ptr(oop_result, vm_result_addr); 635 } 636 637 638 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 639 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 640 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 641 relocate(rspec); 642 call(entry, relocInfo::none); 643 if (emit_delay) { 644 delayed()->nop(); 645 } 646 } 647 648 649 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 650 address save_pc; 651 int shiftcnt; 652 #ifdef VALIDATE_PIPELINE 653 assert_no_delay("Cannot put two instructions in delay-slot."); 654 #endif 655 v9_dep(); 656 save_pc = pc(); 657 658 int msb32 = (int) (addrlit.value() >> 32); 659 int lsb32 = (int) (addrlit.value()); 660 661 if (msb32 == 0 && lsb32 >= 0) { 662 Assembler::sethi(lsb32, d, addrlit.rspec()); 663 } 664 else if (msb32 == -1) { 665 Assembler::sethi(~lsb32, d, addrlit.rspec()); 666 xor3(d, ~low10(~0), d); 667 } 668 else { 669 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 670 if (msb32 & 0x3ff) // Any bits? 671 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 672 if (lsb32 & 0xFFFFFC00) { // done? 673 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 674 sllx(d, 12, d); // Make room for next 12 bits 675 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 676 shiftcnt = 0; // We already shifted 677 } 678 else 679 shiftcnt = 12; 680 if ((lsb32 >> 10) & 0x3ff) { 681 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 682 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 683 shiftcnt = 0; 684 } 685 else 686 shiftcnt = 10; 687 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 688 } 689 else 690 sllx(d, 32, d); 691 } 692 // Pad out the instruction sequence so it can be patched later. 693 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 694 addrlit.rtype() != relocInfo::runtime_call_type)) { 695 while (pc() < (save_pc + (7 * BytesPerInstWord))) 696 nop(); 697 } 698 } 699 700 701 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 702 internal_sethi(addrlit, d, false); 703 } 704 705 706 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 707 internal_sethi(addrlit, d, true); 708 } 709 710 711 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 712 if (worst_case) return 7; 713 intptr_t iaddr = (intptr_t) a; 714 int msb32 = (int) (iaddr >> 32); 715 int lsb32 = (int) (iaddr); 716 int count; 717 if (msb32 == 0 && lsb32 >= 0) 718 count = 1; 719 else if (msb32 == -1) 720 count = 2; 721 else { 722 count = 2; 723 if (msb32 & 0x3ff) 724 count++; 725 if (lsb32 & 0xFFFFFC00 ) { 726 if ((lsb32 >> 20) & 0xfff) count += 2; 727 if ((lsb32 >> 10) & 0x3ff) count += 2; 728 } 729 } 730 return count; 731 } 732 733 int MacroAssembler::worst_case_insts_for_set() { 734 return insts_for_sethi(NULL, true) + 1; 735 } 736 737 738 // Keep in sync with MacroAssembler::insts_for_internal_set 739 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 740 intptr_t value = addrlit.value(); 741 742 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 743 // can optimize 744 if (-4096 <= value && value <= 4095) { 745 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 746 return; 747 } 748 if (inv_hi22(hi22(value)) == value) { 749 sethi(addrlit, d); 750 return; 751 } 752 } 753 assert_no_delay("Cannot put two instructions in delay-slot."); 754 internal_sethi(addrlit, d, ForceRelocatable); 755 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 756 add(d, addrlit.low10(), d, addrlit.rspec()); 757 } 758 } 759 760 // Keep in sync with MacroAssembler::internal_set 761 int MacroAssembler::insts_for_internal_set(intptr_t value) { 762 // can optimize 763 if (-4096 <= value && value <= 4095) { 764 return 1; 765 } 766 if (inv_hi22(hi22(value)) == value) { 767 return insts_for_sethi((address) value); 768 } 769 int count = insts_for_sethi((address) value); 770 AddressLiteral al(value); 771 if (al.low10() != 0) { 772 count++; 773 } 774 return count; 775 } 776 777 void MacroAssembler::set(const AddressLiteral& al, Register d) { 778 internal_set(al, d, false); 779 } 780 781 void MacroAssembler::set(intptr_t value, Register d) { 782 AddressLiteral al(value); 783 internal_set(al, d, false); 784 } 785 786 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 787 AddressLiteral al(addr, rspec); 788 internal_set(al, d, false); 789 } 790 791 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 792 internal_set(al, d, true); 793 } 794 795 void MacroAssembler::patchable_set(intptr_t value, Register d) { 796 AddressLiteral al(value); 797 internal_set(al, d, true); 798 } 799 800 801 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 802 assert_not_delayed(); 803 v9_dep(); 804 805 int hi = (int)(value >> 32); 806 int lo = (int)(value & ~0); 807 int bits_33to2 = (int)((value >> 2) & ~0); 808 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 809 if (Assembler::is_simm13(lo) && value == lo) { 810 or3(G0, lo, d); 811 } else if (hi == 0) { 812 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 813 if (low10(lo) != 0) 814 or3(d, low10(lo), d); 815 } 816 else if ((hi >> 2) == 0) { 817 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 818 sllx(d, 2, d); 819 if (low12(lo) != 0) 820 or3(d, low12(lo), d); 821 } 822 else if (hi == -1) { 823 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 824 xor3(d, low10(lo) ^ ~low10(~0), d); 825 } 826 else if (lo == 0) { 827 if (Assembler::is_simm13(hi)) { 828 or3(G0, hi, d); 829 } else { 830 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 831 if (low10(hi) != 0) 832 or3(d, low10(hi), d); 833 } 834 sllx(d, 32, d); 835 } 836 else { 837 Assembler::sethi(hi, tmp); 838 Assembler::sethi(lo, d); // macro assembler version sign-extends 839 if (low10(hi) != 0) 840 or3 (tmp, low10(hi), tmp); 841 if (low10(lo) != 0) 842 or3 ( d, low10(lo), d); 843 sllx(tmp, 32, tmp); 844 or3 (d, tmp, d); 845 } 846 } 847 848 int MacroAssembler::insts_for_set64(jlong value) { 849 v9_dep(); 850 851 int hi = (int) (value >> 32); 852 int lo = (int) (value & ~0); 853 int count = 0; 854 855 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 856 if (Assembler::is_simm13(lo) && value == lo) { 857 count++; 858 } else if (hi == 0) { 859 count++; 860 if (low10(lo) != 0) 861 count++; 862 } 863 else if (hi == -1) { 864 count += 2; 865 } 866 else if (lo == 0) { 867 if (Assembler::is_simm13(hi)) { 868 count++; 869 } else { 870 count++; 871 if (low10(hi) != 0) 872 count++; 873 } 874 count++; 875 } 876 else { 877 count += 2; 878 if (low10(hi) != 0) 879 count++; 880 if (low10(lo) != 0) 881 count++; 882 count += 2; 883 } 884 return count; 885 } 886 887 // compute size in bytes of sparc frame, given 888 // number of extraWords 889 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 890 891 int nWords = frame::memory_parameter_word_sp_offset; 892 893 nWords += extraWords; 894 895 if (nWords & 1) ++nWords; // round up to double-word 896 897 return nWords * BytesPerWord; 898 } 899 900 901 // save_frame: given number of "extra" words in frame, 902 // issue approp. save instruction (p 200, v8 manual) 903 904 void MacroAssembler::save_frame(int extraWords) { 905 int delta = -total_frame_size_in_bytes(extraWords); 906 if (is_simm13(delta)) { 907 save(SP, delta, SP); 908 } else { 909 set(delta, G3_scratch); 910 save(SP, G3_scratch, SP); 911 } 912 } 913 914 915 void MacroAssembler::save_frame_c1(int size_in_bytes) { 916 if (is_simm13(-size_in_bytes)) { 917 save(SP, -size_in_bytes, SP); 918 } else { 919 set(-size_in_bytes, G3_scratch); 920 save(SP, G3_scratch, SP); 921 } 922 } 923 924 925 void MacroAssembler::save_frame_and_mov(int extraWords, 926 Register s1, Register d1, 927 Register s2, Register d2) { 928 assert_not_delayed(); 929 930 // The trick here is to use precisely the same memory word 931 // that trap handlers also use to save the register. 932 // This word cannot be used for any other purpose, but 933 // it works fine to save the register's value, whether or not 934 // an interrupt flushes register windows at any given moment! 935 Address s1_addr; 936 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 937 s1_addr = s1->address_in_saved_window(); 938 st_ptr(s1, s1_addr); 939 } 940 941 Address s2_addr; 942 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 943 s2_addr = s2->address_in_saved_window(); 944 st_ptr(s2, s2_addr); 945 } 946 947 save_frame(extraWords); 948 949 if (s1_addr.base() == SP) { 950 ld_ptr(s1_addr.after_save(), d1); 951 } else if (s1->is_valid()) { 952 mov(s1->after_save(), d1); 953 } 954 955 if (s2_addr.base() == SP) { 956 ld_ptr(s2_addr.after_save(), d2); 957 } else if (s2->is_valid()) { 958 mov(s2->after_save(), d2); 959 } 960 } 961 962 963 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 964 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 965 int index = oop_recorder()->allocate_metadata_index(obj); 966 RelocationHolder rspec = metadata_Relocation::spec(index); 967 return AddressLiteral((address)obj, rspec); 968 } 969 970 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 971 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 972 int index = oop_recorder()->find_index(obj); 973 RelocationHolder rspec = metadata_Relocation::spec(index); 974 return AddressLiteral((address)obj, rspec); 975 } 976 977 978 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 979 #ifdef ASSERT 980 { 981 ThreadInVMfromUnknown tiv; 982 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 983 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 984 } 985 #endif 986 int oop_index = oop_recorder()->find_index(obj); 987 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 988 } 989 990 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 991 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 992 int oop_index = oop_recorder()->find_index(obj); 993 RelocationHolder rspec = oop_Relocation::spec(oop_index); 994 995 assert_not_delayed(); 996 // Relocation with special format (see relocInfo_sparc.hpp). 997 relocate(rspec, 1); 998 // Assembler::sethi(0x3fffff, d); 999 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1000 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1001 add(d, 0x3ff, d); 1002 1003 } 1004 1005 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1006 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1007 int klass_index = oop_recorder()->find_index(k); 1008 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1009 narrowOop encoded_k = CompressedKlassPointers::encode(k); 1010 1011 assert_not_delayed(); 1012 // Relocation with special format (see relocInfo_sparc.hpp). 1013 relocate(rspec, 1); 1014 // Assembler::sethi(encoded_k, d); 1015 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1016 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1017 add(d, low10(encoded_k), d); 1018 1019 } 1020 1021 void MacroAssembler::align(int modulus) { 1022 while (offset() % modulus != 0) nop(); 1023 } 1024 1025 void RegistersForDebugging::print(outputStream* s) { 1026 FlagSetting fs(Debugging, true); 1027 int j; 1028 for (j = 0; j < 8; ++j) { 1029 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1030 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1031 } 1032 s->cr(); 1033 1034 for (j = 0; j < 8; ++j) { 1035 s->print("l%d = ", j); os::print_location(s, l[j]); 1036 } 1037 s->cr(); 1038 1039 for (j = 0; j < 8; ++j) { 1040 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1041 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1042 } 1043 s->cr(); 1044 1045 for (j = 0; j < 8; ++j) { 1046 s->print("g%d = ", j); os::print_location(s, g[j]); 1047 } 1048 s->cr(); 1049 1050 // print out floats with compression 1051 for (j = 0; j < 32; ) { 1052 jfloat val = f[j]; 1053 int last = j; 1054 for ( ; last+1 < 32; ++last ) { 1055 char b1[1024], b2[1024]; 1056 sprintf(b1, "%f", val); 1057 sprintf(b2, "%f", f[last+1]); 1058 if (strcmp(b1, b2)) 1059 break; 1060 } 1061 s->print("f%d", j); 1062 if ( j != last ) s->print(" - f%d", last); 1063 s->print(" = %f", val); 1064 s->fill_to(25); 1065 s->print_cr(" (0x%x)", *(int*)&val); 1066 j = last + 1; 1067 } 1068 s->cr(); 1069 1070 // and doubles (evens only) 1071 for (j = 0; j < 32; ) { 1072 jdouble val = d[j]; 1073 int last = j; 1074 for ( ; last+1 < 32; ++last ) { 1075 char b1[1024], b2[1024]; 1076 sprintf(b1, "%f", val); 1077 sprintf(b2, "%f", d[last+1]); 1078 if (strcmp(b1, b2)) 1079 break; 1080 } 1081 s->print("d%d", 2 * j); 1082 if ( j != last ) s->print(" - d%d", last); 1083 s->print(" = %f", val); 1084 s->fill_to(30); 1085 s->print("(0x%x)", *(int*)&val); 1086 s->fill_to(42); 1087 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1088 j = last + 1; 1089 } 1090 s->cr(); 1091 } 1092 1093 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1094 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1095 a->flushw(); 1096 int i; 1097 for (i = 0; i < 8; ++i) { 1098 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1099 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1100 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1101 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1102 } 1103 for (i = 0; i < 32; ++i) { 1104 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1105 } 1106 for (i = 0; i < 64; i += 2) { 1107 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1108 } 1109 } 1110 1111 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1112 for (int i = 1; i < 8; ++i) { 1113 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1114 } 1115 for (int j = 0; j < 32; ++j) { 1116 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1117 } 1118 for (int k = 0; k < 64; k += 2) { 1119 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1120 } 1121 } 1122 1123 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1124 // plausibility check for oops 1125 if (!VerifyOops) return; 1126 1127 if (reg == G0) return; // always NULL, which is always an oop 1128 1129 BLOCK_COMMENT("verify_oop {"); 1130 char buffer[64]; 1131 #ifdef COMPILER1 1132 if (CommentedAssembly) { 1133 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1134 block_comment(buffer); 1135 } 1136 #endif 1137 1138 const char* real_msg = NULL; 1139 { 1140 ResourceMark rm; 1141 stringStream ss; 1142 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1143 real_msg = code_string(ss.as_string()); 1144 } 1145 1146 // Call indirectly to solve generation ordering problem 1147 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1148 1149 // Make some space on stack above the current register window. 1150 // Enough to hold 8 64-bit registers. 1151 add(SP,-8*8,SP); 1152 1153 // Save some 64-bit registers; a normal 'save' chops the heads off 1154 // of 64-bit longs in the 32-bit build. 1155 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1156 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1157 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1158 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1159 1160 // Size of set() should stay the same 1161 patchable_set((intptr_t)real_msg, O1); 1162 // Load address to call to into O7 1163 load_ptr_contents(a, O7); 1164 // Register call to verify_oop_subroutine 1165 callr(O7, G0); 1166 delayed()->nop(); 1167 // recover frame size 1168 add(SP, 8*8,SP); 1169 BLOCK_COMMENT("} verify_oop"); 1170 } 1171 1172 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1173 // plausibility check for oops 1174 if (!VerifyOops) return; 1175 1176 const char* real_msg = NULL; 1177 { 1178 ResourceMark rm; 1179 stringStream ss; 1180 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1181 real_msg = code_string(ss.as_string()); 1182 } 1183 1184 // Call indirectly to solve generation ordering problem 1185 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1186 1187 // Make some space on stack above the current register window. 1188 // Enough to hold 8 64-bit registers. 1189 add(SP,-8*8,SP); 1190 1191 // Save some 64-bit registers; a normal 'save' chops the heads off 1192 // of 64-bit longs in the 32-bit build. 1193 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1194 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1195 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1196 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1197 1198 // Size of set() should stay the same 1199 patchable_set((intptr_t)real_msg, O1); 1200 // Load address to call to into O7 1201 load_ptr_contents(a, O7); 1202 // Register call to verify_oop_subroutine 1203 callr(O7, G0); 1204 delayed()->nop(); 1205 // recover frame size 1206 add(SP, 8*8,SP); 1207 } 1208 1209 // side-door communication with signalHandler in os_solaris.cpp 1210 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1211 1212 // This macro is expanded just once; it creates shared code. Contract: 1213 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1214 // registers, including flags. May not use a register 'save', as this blows 1215 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1216 // call. 1217 void MacroAssembler::verify_oop_subroutine() { 1218 // Leaf call; no frame. 1219 Label succeed, fail, null_or_fail; 1220 1221 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1222 // O0 is now the oop to be checked. O7 is the return address. 1223 Register O0_obj = O0; 1224 1225 // Save some more registers for temps. 1226 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1227 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1228 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1229 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1230 1231 // Save flags 1232 Register O5_save_flags = O5; 1233 rdccr( O5_save_flags ); 1234 1235 { // count number of verifies 1236 Register O2_adr = O2; 1237 Register O3_accum = O3; 1238 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1239 } 1240 1241 Register O2_mask = O2; 1242 Register O3_bits = O3; 1243 Register O4_temp = O4; 1244 1245 // mark lower end of faulting range 1246 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1247 _verify_oop_implicit_branch[0] = pc(); 1248 1249 // We can't check the mark oop because it could be in the process of 1250 // locking or unlocking while this is running. 1251 set(Universe::verify_oop_mask (), O2_mask); 1252 set(Universe::verify_oop_bits (), O3_bits); 1253 1254 // assert((obj & oop_mask) == oop_bits); 1255 and3(O0_obj, O2_mask, O4_temp); 1256 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1257 1258 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1259 // the null_or_fail case is useless; must test for null separately 1260 br_null_short(O0_obj, pn, succeed); 1261 } 1262 1263 // Check the Klass* of this object for being in the right area of memory. 1264 // Cannot do the load in the delay above slot in case O0 is null 1265 load_klass(O0_obj, O0_obj); 1266 // assert((klass != NULL) 1267 br_null_short(O0_obj, pn, fail); 1268 1269 wrccr( O5_save_flags ); // Restore CCR's 1270 1271 // mark upper end of faulting range 1272 _verify_oop_implicit_branch[1] = pc(); 1273 1274 //----------------------- 1275 // all tests pass 1276 bind(succeed); 1277 1278 // Restore prior 64-bit registers 1279 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1280 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1281 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1282 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1283 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1284 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1285 1286 retl(); // Leaf return; restore prior O7 in delay slot 1287 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1288 1289 //----------------------- 1290 bind(null_or_fail); // nulls are less common but OK 1291 br_null(O0_obj, false, pt, succeed); 1292 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1293 1294 //----------------------- 1295 // report failure: 1296 bind(fail); 1297 _verify_oop_implicit_branch[2] = pc(); 1298 1299 wrccr( O5_save_flags ); // Restore CCR's 1300 1301 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1302 1303 // stop_subroutine expects message pointer in I1. 1304 mov(I1, O1); 1305 1306 // Restore prior 64-bit registers 1307 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1308 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1309 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1310 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1311 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1312 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1313 1314 // factor long stop-sequence into subroutine to save space 1315 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1316 1317 // call indirectly to solve generation ordering problem 1318 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1319 load_ptr_contents(al, O5); 1320 jmpl(O5, 0, O7); 1321 delayed()->nop(); 1322 } 1323 1324 1325 void MacroAssembler::stop(const char* msg) { 1326 // save frame first to get O7 for return address 1327 // add one word to size in case struct is odd number of words long 1328 // It must be doubleword-aligned for storing doubles into it. 1329 1330 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1331 1332 // stop_subroutine expects message pointer in I1. 1333 // Size of set() should stay the same 1334 patchable_set((intptr_t)msg, O1); 1335 1336 // factor long stop-sequence into subroutine to save space 1337 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1338 1339 // call indirectly to solve generation ordering problem 1340 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1341 load_ptr_contents(a, O5); 1342 jmpl(O5, 0, O7); 1343 delayed()->nop(); 1344 1345 breakpoint_trap(); // make stop actually stop rather than writing 1346 // unnoticeable results in the output files. 1347 1348 // restore(); done in callee to save space! 1349 } 1350 1351 1352 void MacroAssembler::warn(const char* msg) { 1353 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1354 RegistersForDebugging::save_registers(this); 1355 mov(O0, L0); 1356 // Size of set() should stay the same 1357 patchable_set((intptr_t)msg, O0); 1358 call( CAST_FROM_FN_PTR(address, warning) ); 1359 delayed()->nop(); 1360 // ret(); 1361 // delayed()->restore(); 1362 RegistersForDebugging::restore_registers(this, L0); 1363 restore(); 1364 } 1365 1366 1367 void MacroAssembler::untested(const char* what) { 1368 // We must be able to turn interactive prompting off 1369 // in order to run automated test scripts on the VM 1370 // Use the flag ShowMessageBoxOnError 1371 1372 const char* b = NULL; 1373 { 1374 ResourceMark rm; 1375 stringStream ss; 1376 ss.print("untested: %s", what); 1377 b = code_string(ss.as_string()); 1378 } 1379 if (ShowMessageBoxOnError) { STOP(b); } 1380 else { warn(b); } 1381 } 1382 1383 1384 void MacroAssembler::unimplemented(const char* what) { 1385 const char* buf = NULL; 1386 { 1387 ResourceMark rm; 1388 stringStream ss; 1389 ss.print("unimplemented: %s", what); 1390 buf = code_string(ss.as_string()); 1391 } 1392 stop(buf); 1393 } 1394 1395 1396 void MacroAssembler::stop_subroutine() { 1397 RegistersForDebugging::save_registers(this); 1398 1399 // for the sake of the debugger, stick a PC on the current frame 1400 // (this assumes that the caller has performed an extra "save") 1401 mov(I7, L7); 1402 add(O7, -7 * BytesPerInt, I7); 1403 1404 save_frame(); // one more save to free up another O7 register 1405 mov(I0, O1); // addr of reg save area 1406 1407 // We expect pointer to message in I1. Caller must set it up in O1 1408 mov(I1, O0); // get msg 1409 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1410 delayed()->nop(); 1411 1412 restore(); 1413 1414 RegistersForDebugging::restore_registers(this, O0); 1415 1416 save_frame(0); 1417 call(CAST_FROM_FN_PTR(address,breakpoint)); 1418 delayed()->nop(); 1419 restore(); 1420 1421 mov(L7, I7); 1422 retl(); 1423 delayed()->restore(); // see stop above 1424 } 1425 1426 1427 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1428 if ( ShowMessageBoxOnError ) { 1429 JavaThread* thread = JavaThread::current(); 1430 JavaThreadState saved_state = thread->thread_state(); 1431 thread->set_thread_state(_thread_in_vm); 1432 { 1433 // In order to get locks work, we need to fake a in_VM state 1434 ttyLocker ttyl; 1435 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1436 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1437 BytecodeCounter::print(); 1438 } 1439 if (os::message_box(msg, "Execution stopped, print registers?")) 1440 regs->print(::tty); 1441 } 1442 BREAKPOINT; 1443 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1444 } 1445 else { 1446 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1447 } 1448 assert(false, "DEBUG MESSAGE: %s", msg); 1449 } 1450 1451 1452 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1453 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1454 Label no_extras; 1455 br( negative, true, pt, no_extras ); // if neg, clear reg 1456 delayed()->set(0, Rresult); // annuled, so only if taken 1457 bind( no_extras ); 1458 } 1459 1460 1461 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1462 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1463 bclr(1, Rresult); 1464 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1465 } 1466 1467 1468 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1469 calc_frame_size(Rextra_words, Rresult); 1470 neg(Rresult); 1471 save(SP, Rresult, SP); 1472 } 1473 1474 1475 // --------------------------------------------------------- 1476 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1477 switch (c) { 1478 /*case zero: */ 1479 case Assembler::equal: return Assembler::rc_z; 1480 case Assembler::lessEqual: return Assembler::rc_lez; 1481 case Assembler::less: return Assembler::rc_lz; 1482 /*case notZero:*/ 1483 case Assembler::notEqual: return Assembler::rc_nz; 1484 case Assembler::greater: return Assembler::rc_gz; 1485 case Assembler::greaterEqual: return Assembler::rc_gez; 1486 } 1487 ShouldNotReachHere(); 1488 return Assembler::rc_z; 1489 } 1490 1491 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1492 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1493 tst(s1); 1494 br (c, a, p, L); 1495 } 1496 1497 // Compares a pointer register with zero and branches on null. 1498 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1499 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1500 assert_not_delayed(); 1501 bpr( rc_z, a, p, s1, L ); 1502 } 1503 1504 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1505 assert_not_delayed(); 1506 bpr( rc_nz, a, p, s1, L ); 1507 } 1508 1509 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1510 1511 // Compare integer (32 bit) values (icc only). 1512 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1513 Predict p, Label& L) { 1514 assert_not_delayed(); 1515 if (use_cbcond(L)) { 1516 Assembler::cbcond(c, icc, s1, s2, L); 1517 } else { 1518 cmp(s1, s2); 1519 br(c, false, p, L); 1520 delayed()->nop(); 1521 } 1522 } 1523 1524 // Compare integer (32 bit) values (icc only). 1525 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1526 Predict p, Label& L) { 1527 assert_not_delayed(); 1528 if (is_simm(simm13a,5) && use_cbcond(L)) { 1529 Assembler::cbcond(c, icc, s1, simm13a, L); 1530 } else { 1531 cmp(s1, simm13a); 1532 br(c, false, p, L); 1533 delayed()->nop(); 1534 } 1535 } 1536 1537 // Branch that tests xcc in LP64 and icc in !LP64 1538 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1539 Predict p, Label& L) { 1540 assert_not_delayed(); 1541 if (use_cbcond(L)) { 1542 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1543 } else { 1544 cmp(s1, s2); 1545 brx(c, false, p, L); 1546 delayed()->nop(); 1547 } 1548 } 1549 1550 // Branch that tests xcc in LP64 and icc in !LP64 1551 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1552 Predict p, Label& L) { 1553 assert_not_delayed(); 1554 if (is_simm(simm13a,5) && use_cbcond(L)) { 1555 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1556 } else { 1557 cmp(s1, simm13a); 1558 brx(c, false, p, L); 1559 delayed()->nop(); 1560 } 1561 } 1562 1563 // Short branch version for compares a pointer with zero. 1564 1565 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1566 assert_not_delayed(); 1567 if (use_cbcond(L)) { 1568 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1569 } else { 1570 br_null(s1, false, p, L); 1571 delayed()->nop(); 1572 } 1573 } 1574 1575 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1576 assert_not_delayed(); 1577 if (use_cbcond(L)) { 1578 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1579 } else { 1580 br_notnull(s1, false, p, L); 1581 delayed()->nop(); 1582 } 1583 } 1584 1585 // Unconditional short branch 1586 void MacroAssembler::ba_short(Label& L) { 1587 assert_not_delayed(); 1588 if (use_cbcond(L)) { 1589 Assembler::cbcond(equal, icc, G0, G0, L); 1590 } else { 1591 br(always, false, pt, L); 1592 delayed()->nop(); 1593 } 1594 } 1595 1596 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1597 1598 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1599 assert_not_delayed(); 1600 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1601 br(cf, false, p, L); 1602 delayed()->nop(); 1603 } 1604 1605 // instruction sequences factored across compiler & interpreter 1606 1607 1608 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1609 Register Rb_hi, Register Rb_low, 1610 Register Rresult) { 1611 1612 Label check_low_parts, done; 1613 1614 cmp(Ra_hi, Rb_hi ); // compare hi parts 1615 br(equal, true, pt, check_low_parts); 1616 delayed()->cmp(Ra_low, Rb_low); // test low parts 1617 1618 // And, with an unsigned comparison, it does not matter if the numbers 1619 // are negative or not. 1620 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1621 // The second one is bigger (unsignedly). 1622 1623 // Other notes: The first move in each triplet can be unconditional 1624 // (and therefore probably prefetchable). 1625 // And the equals case for the high part does not need testing, 1626 // since that triplet is reached only after finding the high halves differ. 1627 1628 mov(-1, Rresult); 1629 ba(done); 1630 delayed()->movcc(greater, false, icc, 1, Rresult); 1631 1632 bind(check_low_parts); 1633 1634 mov( -1, Rresult); 1635 movcc(equal, false, icc, 0, Rresult); 1636 movcc(greaterUnsigned, false, icc, 1, Rresult); 1637 1638 bind(done); 1639 } 1640 1641 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1642 subcc( G0, Rlow, Rlow ); 1643 subc( G0, Rhi, Rhi ); 1644 } 1645 1646 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1647 Register Rcount, 1648 Register Rout_high, Register Rout_low, 1649 Register Rtemp ) { 1650 1651 1652 Register Ralt_count = Rtemp; 1653 Register Rxfer_bits = Rtemp; 1654 1655 assert( Ralt_count != Rin_high 1656 && Ralt_count != Rin_low 1657 && Ralt_count != Rcount 1658 && Rxfer_bits != Rin_low 1659 && Rxfer_bits != Rin_high 1660 && Rxfer_bits != Rcount 1661 && Rxfer_bits != Rout_low 1662 && Rout_low != Rin_high, 1663 "register alias checks"); 1664 1665 Label big_shift, done; 1666 1667 // This code can be optimized to use the 64 bit shifts in V9. 1668 // Here we use the 32 bit shifts. 1669 1670 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1671 subcc(Rcount, 31, Ralt_count); 1672 br(greater, true, pn, big_shift); 1673 delayed()->dec(Ralt_count); 1674 1675 // shift < 32 bits, Ralt_count = Rcount-31 1676 1677 // We get the transfer bits by shifting right by 32-count the low 1678 // register. This is done by shifting right by 31-count and then by one 1679 // more to take care of the special (rare) case where count is zero 1680 // (shifting by 32 would not work). 1681 1682 neg(Ralt_count); 1683 1684 // The order of the next two instructions is critical in the case where 1685 // Rin and Rout are the same and should not be reversed. 1686 1687 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1688 if (Rcount != Rout_low) { 1689 sll(Rin_low, Rcount, Rout_low); // low half 1690 } 1691 sll(Rin_high, Rcount, Rout_high); 1692 if (Rcount == Rout_low) { 1693 sll(Rin_low, Rcount, Rout_low); // low half 1694 } 1695 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1696 ba(done); 1697 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1698 1699 // shift >= 32 bits, Ralt_count = Rcount-32 1700 bind(big_shift); 1701 sll(Rin_low, Ralt_count, Rout_high ); 1702 clr(Rout_low); 1703 1704 bind(done); 1705 } 1706 1707 1708 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1709 Register Rcount, 1710 Register Rout_high, Register Rout_low, 1711 Register Rtemp ) { 1712 1713 Register Ralt_count = Rtemp; 1714 Register Rxfer_bits = Rtemp; 1715 1716 assert( Ralt_count != Rin_high 1717 && Ralt_count != Rin_low 1718 && Ralt_count != Rcount 1719 && Rxfer_bits != Rin_low 1720 && Rxfer_bits != Rin_high 1721 && Rxfer_bits != Rcount 1722 && Rxfer_bits != Rout_high 1723 && Rout_high != Rin_low, 1724 "register alias checks"); 1725 1726 Label big_shift, done; 1727 1728 // This code can be optimized to use the 64 bit shifts in V9. 1729 // Here we use the 32 bit shifts. 1730 1731 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1732 subcc(Rcount, 31, Ralt_count); 1733 br(greater, true, pn, big_shift); 1734 delayed()->dec(Ralt_count); 1735 1736 // shift < 32 bits, Ralt_count = Rcount-31 1737 1738 // We get the transfer bits by shifting left by 32-count the high 1739 // register. This is done by shifting left by 31-count and then by one 1740 // more to take care of the special (rare) case where count is zero 1741 // (shifting by 32 would not work). 1742 1743 neg(Ralt_count); 1744 if (Rcount != Rout_low) { 1745 srl(Rin_low, Rcount, Rout_low); 1746 } 1747 1748 // The order of the next two instructions is critical in the case where 1749 // Rin and Rout are the same and should not be reversed. 1750 1751 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1752 sra(Rin_high, Rcount, Rout_high ); // high half 1753 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1754 if (Rcount == Rout_low) { 1755 srl(Rin_low, Rcount, Rout_low); 1756 } 1757 ba(done); 1758 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1759 1760 // shift >= 32 bits, Ralt_count = Rcount-32 1761 bind(big_shift); 1762 1763 sra(Rin_high, Ralt_count, Rout_low); 1764 sra(Rin_high, 31, Rout_high); // sign into hi 1765 1766 bind( done ); 1767 } 1768 1769 1770 1771 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1772 Register Rcount, 1773 Register Rout_high, Register Rout_low, 1774 Register Rtemp ) { 1775 1776 Register Ralt_count = Rtemp; 1777 Register Rxfer_bits = Rtemp; 1778 1779 assert( Ralt_count != Rin_high 1780 && Ralt_count != Rin_low 1781 && Ralt_count != Rcount 1782 && Rxfer_bits != Rin_low 1783 && Rxfer_bits != Rin_high 1784 && Rxfer_bits != Rcount 1785 && Rxfer_bits != Rout_high 1786 && Rout_high != Rin_low, 1787 "register alias checks"); 1788 1789 Label big_shift, done; 1790 1791 // This code can be optimized to use the 64 bit shifts in V9. 1792 // Here we use the 32 bit shifts. 1793 1794 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1795 subcc(Rcount, 31, Ralt_count); 1796 br(greater, true, pn, big_shift); 1797 delayed()->dec(Ralt_count); 1798 1799 // shift < 32 bits, Ralt_count = Rcount-31 1800 1801 // We get the transfer bits by shifting left by 32-count the high 1802 // register. This is done by shifting left by 31-count and then by one 1803 // more to take care of the special (rare) case where count is zero 1804 // (shifting by 32 would not work). 1805 1806 neg(Ralt_count); 1807 if (Rcount != Rout_low) { 1808 srl(Rin_low, Rcount, Rout_low); 1809 } 1810 1811 // The order of the next two instructions is critical in the case where 1812 // Rin and Rout are the same and should not be reversed. 1813 1814 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1815 srl(Rin_high, Rcount, Rout_high ); // high half 1816 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1817 if (Rcount == Rout_low) { 1818 srl(Rin_low, Rcount, Rout_low); 1819 } 1820 ba(done); 1821 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1822 1823 // shift >= 32 bits, Ralt_count = Rcount-32 1824 bind(big_shift); 1825 1826 srl(Rin_high, Ralt_count, Rout_low); 1827 clr(Rout_high); 1828 1829 bind( done ); 1830 } 1831 1832 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1833 cmp(Ra, Rb); 1834 mov(-1, Rresult); 1835 movcc(equal, false, xcc, 0, Rresult); 1836 movcc(greater, false, xcc, 1, Rresult); 1837 } 1838 1839 1840 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1841 switch (size_in_bytes) { 1842 case 8: ld_long(src, dst); break; 1843 case 4: ld( src, dst); break; 1844 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1845 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1846 default: ShouldNotReachHere(); 1847 } 1848 } 1849 1850 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1851 switch (size_in_bytes) { 1852 case 8: st_long(src, dst); break; 1853 case 4: st( src, dst); break; 1854 case 2: sth( src, dst); break; 1855 case 1: stb( src, dst); break; 1856 default: ShouldNotReachHere(); 1857 } 1858 } 1859 1860 1861 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1862 FloatRegister Fa, FloatRegister Fb, 1863 Register Rresult) { 1864 if (is_float) { 1865 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1866 } else { 1867 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1868 } 1869 1870 if (unordered_result == 1) { 1871 mov( -1, Rresult); 1872 movcc(f_equal, true, fcc0, 0, Rresult); 1873 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1874 } else { 1875 mov( -1, Rresult); 1876 movcc(f_equal, true, fcc0, 0, Rresult); 1877 movcc(f_greater, true, fcc0, 1, Rresult); 1878 } 1879 } 1880 1881 1882 void MacroAssembler::save_all_globals_into_locals() { 1883 mov(G1,L1); 1884 mov(G2,L2); 1885 mov(G3,L3); 1886 mov(G4,L4); 1887 mov(G5,L5); 1888 mov(G6,L6); 1889 mov(G7,L7); 1890 } 1891 1892 void MacroAssembler::restore_globals_from_locals() { 1893 mov(L1,G1); 1894 mov(L2,G2); 1895 mov(L3,G3); 1896 mov(L4,G4); 1897 mov(L5,G5); 1898 mov(L6,G6); 1899 mov(L7,G7); 1900 } 1901 1902 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1903 Register tmp, 1904 int offset) { 1905 intptr_t value = *delayed_value_addr; 1906 if (value != 0) 1907 return RegisterOrConstant(value + offset); 1908 1909 // load indirectly to solve generation ordering problem 1910 AddressLiteral a(delayed_value_addr); 1911 load_ptr_contents(a, tmp); 1912 1913 #ifdef ASSERT 1914 tst(tmp); 1915 breakpoint_trap(zero, xcc); 1916 #endif 1917 1918 if (offset != 0) 1919 add(tmp, offset, tmp); 1920 1921 return RegisterOrConstant(tmp); 1922 } 1923 1924 1925 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1926 assert(d.register_or_noreg() != G0, "lost side effect"); 1927 if ((s2.is_constant() && s2.as_constant() == 0) || 1928 (s2.is_register() && s2.as_register() == G0)) { 1929 // Do nothing, just move value. 1930 if (s1.is_register()) { 1931 if (d.is_constant()) d = temp; 1932 mov(s1.as_register(), d.as_register()); 1933 return d; 1934 } else { 1935 return s1; 1936 } 1937 } 1938 1939 if (s1.is_register()) { 1940 assert_different_registers(s1.as_register(), temp); 1941 if (d.is_constant()) d = temp; 1942 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1943 return d; 1944 } else { 1945 if (s2.is_register()) { 1946 assert_different_registers(s2.as_register(), temp); 1947 if (d.is_constant()) d = temp; 1948 set(s1.as_constant(), temp); 1949 andn(temp, s2.as_register(), d.as_register()); 1950 return d; 1951 } else { 1952 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1953 return res; 1954 } 1955 } 1956 } 1957 1958 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1959 assert(d.register_or_noreg() != G0, "lost side effect"); 1960 if ((s2.is_constant() && s2.as_constant() == 0) || 1961 (s2.is_register() && s2.as_register() == G0)) { 1962 // Do nothing, just move value. 1963 if (s1.is_register()) { 1964 if (d.is_constant()) d = temp; 1965 mov(s1.as_register(), d.as_register()); 1966 return d; 1967 } else { 1968 return s1; 1969 } 1970 } 1971 1972 if (s1.is_register()) { 1973 assert_different_registers(s1.as_register(), temp); 1974 if (d.is_constant()) d = temp; 1975 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1976 return d; 1977 } else { 1978 if (s2.is_register()) { 1979 assert_different_registers(s2.as_register(), temp); 1980 if (d.is_constant()) d = temp; 1981 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 1982 return d; 1983 } else { 1984 intptr_t res = s1.as_constant() + s2.as_constant(); 1985 return res; 1986 } 1987 } 1988 } 1989 1990 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1991 assert(d.register_or_noreg() != G0, "lost side effect"); 1992 if (!is_simm13(s2.constant_or_zero())) 1993 s2 = (s2.as_constant() & 0xFF); 1994 if ((s2.is_constant() && s2.as_constant() == 0) || 1995 (s2.is_register() && s2.as_register() == G0)) { 1996 // Do nothing, just move value. 1997 if (s1.is_register()) { 1998 if (d.is_constant()) d = temp; 1999 mov(s1.as_register(), d.as_register()); 2000 return d; 2001 } else { 2002 return s1; 2003 } 2004 } 2005 2006 if (s1.is_register()) { 2007 assert_different_registers(s1.as_register(), temp); 2008 if (d.is_constant()) d = temp; 2009 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2010 return d; 2011 } else { 2012 if (s2.is_register()) { 2013 assert_different_registers(s2.as_register(), temp); 2014 if (d.is_constant()) d = temp; 2015 set(s1.as_constant(), temp); 2016 sll_ptr(temp, s2.as_register(), d.as_register()); 2017 return d; 2018 } else { 2019 intptr_t res = s1.as_constant() << s2.as_constant(); 2020 return res; 2021 } 2022 } 2023 } 2024 2025 2026 // Look up the method for a megamorphic invokeinterface call. 2027 // The target method is determined by <intf_klass, itable_index>. 2028 // The receiver klass is in recv_klass. 2029 // On success, the result will be in method_result, and execution falls through. 2030 // On failure, execution transfers to the given label. 2031 void MacroAssembler::lookup_interface_method(Register recv_klass, 2032 Register intf_klass, 2033 RegisterOrConstant itable_index, 2034 Register method_result, 2035 Register scan_temp, 2036 Register sethi_temp, 2037 Label& L_no_such_interface, 2038 bool return_method) { 2039 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2040 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2041 "caller must use same register for non-constant itable index as for method"); 2042 2043 Label L_no_such_interface_restore; 2044 bool did_save = false; 2045 if (scan_temp == noreg || sethi_temp == noreg) { 2046 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2047 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2048 assert(method_result->is_global(), "must be able to return value"); 2049 scan_temp = L2; 2050 sethi_temp = L3; 2051 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2052 recv_klass = recv_2; 2053 intf_klass = intf_2; 2054 did_save = true; 2055 } 2056 2057 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2058 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2059 int scan_step = itableOffsetEntry::size() * wordSize; 2060 int vte_size = vtableEntry::size_in_bytes(); 2061 2062 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2063 // %%% We should store the aligned, prescaled offset in the klassoop. 2064 // Then the next several instructions would fold away. 2065 2066 int itb_offset = vtable_base; 2067 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2068 sll(scan_temp, itb_scale, scan_temp); 2069 add(scan_temp, itb_offset, scan_temp); 2070 add(recv_klass, scan_temp, scan_temp); 2071 2072 if (return_method) { 2073 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2074 RegisterOrConstant itable_offset = itable_index; 2075 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2076 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2077 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2078 } 2079 2080 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2081 // if (scan->interface() == intf) { 2082 // result = (klass + scan->offset() + itable_index); 2083 // } 2084 // } 2085 Label L_search, L_found_method; 2086 2087 for (int peel = 1; peel >= 0; peel--) { 2088 // %%%% Could load both offset and interface in one ldx, if they were 2089 // in the opposite order. This would save a load. 2090 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2091 2092 // Check that this entry is non-null. A null entry means that 2093 // the receiver class doesn't implement the interface, and wasn't the 2094 // same as when the caller was compiled. 2095 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2096 delayed()->cmp(method_result, intf_klass); 2097 2098 if (peel) { 2099 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2100 } else { 2101 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2102 // (invert the test to fall through to found_method...) 2103 } 2104 delayed()->add(scan_temp, scan_step, scan_temp); 2105 2106 if (!peel) break; 2107 2108 bind(L_search); 2109 } 2110 2111 bind(L_found_method); 2112 2113 if (return_method) { 2114 // Got a hit. 2115 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2116 // scan_temp[-scan_step] points to the vtable offset we need 2117 ito_offset -= scan_step; 2118 lduw(scan_temp, ito_offset, scan_temp); 2119 ld_ptr(recv_klass, scan_temp, method_result); 2120 } 2121 2122 if (did_save) { 2123 Label L_done; 2124 ba(L_done); 2125 delayed()->restore(); 2126 2127 bind(L_no_such_interface_restore); 2128 ba(L_no_such_interface); 2129 delayed()->restore(); 2130 2131 bind(L_done); 2132 } 2133 } 2134 2135 2136 // virtual method calling 2137 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2138 RegisterOrConstant vtable_index, 2139 Register method_result) { 2140 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2141 Register sethi_temp = method_result; 2142 const int base = in_bytes(Klass::vtable_start_offset()) + 2143 // method pointer offset within the vtable entry: 2144 vtableEntry::method_offset_in_bytes(); 2145 RegisterOrConstant vtable_offset = vtable_index; 2146 // Each of the following three lines potentially generates an instruction. 2147 // But the total number of address formation instructions will always be 2148 // at most two, and will often be zero. In any case, it will be optimal. 2149 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2150 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2151 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2152 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2153 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2154 ld_ptr(vtable_entry_addr, method_result); 2155 } 2156 2157 2158 void MacroAssembler::check_klass_subtype(Register sub_klass, 2159 Register super_klass, 2160 Register temp_reg, 2161 Register temp2_reg, 2162 Label& L_success) { 2163 Register sub_2 = sub_klass; 2164 Register sup_2 = super_klass; 2165 if (!sub_2->is_global()) sub_2 = L0; 2166 if (!sup_2->is_global()) sup_2 = L1; 2167 bool did_save = false; 2168 if (temp_reg == noreg || temp2_reg == noreg) { 2169 temp_reg = L2; 2170 temp2_reg = L3; 2171 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2172 sub_klass = sub_2; 2173 super_klass = sup_2; 2174 did_save = true; 2175 } 2176 Label L_failure, L_pop_to_failure, L_pop_to_success; 2177 check_klass_subtype_fast_path(sub_klass, super_klass, 2178 temp_reg, temp2_reg, 2179 (did_save ? &L_pop_to_success : &L_success), 2180 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2181 2182 if (!did_save) 2183 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2184 check_klass_subtype_slow_path(sub_2, sup_2, 2185 L2, L3, L4, L5, 2186 NULL, &L_pop_to_failure); 2187 2188 // on success: 2189 bind(L_pop_to_success); 2190 restore(); 2191 ba_short(L_success); 2192 2193 // on failure: 2194 bind(L_pop_to_failure); 2195 restore(); 2196 bind(L_failure); 2197 } 2198 2199 2200 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2201 Register super_klass, 2202 Register temp_reg, 2203 Register temp2_reg, 2204 Label* L_success, 2205 Label* L_failure, 2206 Label* L_slow_path, 2207 RegisterOrConstant super_check_offset) { 2208 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2209 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2210 2211 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2212 bool need_slow_path = (must_load_sco || 2213 super_check_offset.constant_or_zero() == sco_offset); 2214 2215 assert_different_registers(sub_klass, super_klass, temp_reg); 2216 if (super_check_offset.is_register()) { 2217 assert_different_registers(sub_klass, super_klass, temp_reg, 2218 super_check_offset.as_register()); 2219 } else if (must_load_sco) { 2220 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2221 } 2222 2223 Label L_fallthrough; 2224 int label_nulls = 0; 2225 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2226 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2227 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2228 assert(label_nulls <= 1 || 2229 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2230 "at most one NULL in the batch, usually"); 2231 2232 // If the pointers are equal, we are done (e.g., String[] elements). 2233 // This self-check enables sharing of secondary supertype arrays among 2234 // non-primary types such as array-of-interface. Otherwise, each such 2235 // type would need its own customized SSA. 2236 // We move this check to the front of the fast path because many 2237 // type checks are in fact trivially successful in this manner, 2238 // so we get a nicely predicted branch right at the start of the check. 2239 cmp(super_klass, sub_klass); 2240 brx(Assembler::equal, false, Assembler::pn, *L_success); 2241 delayed()->nop(); 2242 2243 // Check the supertype display: 2244 if (must_load_sco) { 2245 // The super check offset is always positive... 2246 lduw(super_klass, sco_offset, temp2_reg); 2247 super_check_offset = RegisterOrConstant(temp2_reg); 2248 // super_check_offset is register. 2249 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2250 } 2251 ld_ptr(sub_klass, super_check_offset, temp_reg); 2252 cmp(super_klass, temp_reg); 2253 2254 // This check has worked decisively for primary supers. 2255 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2256 // (Secondary supers are interfaces and very deeply nested subtypes.) 2257 // This works in the same check above because of a tricky aliasing 2258 // between the super_cache and the primary super display elements. 2259 // (The 'super_check_addr' can address either, as the case requires.) 2260 // Note that the cache is updated below if it does not help us find 2261 // what we need immediately. 2262 // So if it was a primary super, we can just fail immediately. 2263 // Otherwise, it's the slow path for us (no success at this point). 2264 2265 // Hacked ba(), which may only be used just before L_fallthrough. 2266 #define FINAL_JUMP(label) \ 2267 if (&(label) != &L_fallthrough) { \ 2268 ba(label); delayed()->nop(); \ 2269 } 2270 2271 if (super_check_offset.is_register()) { 2272 brx(Assembler::equal, false, Assembler::pn, *L_success); 2273 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2274 2275 if (L_failure == &L_fallthrough) { 2276 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2277 delayed()->nop(); 2278 } else { 2279 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2280 delayed()->nop(); 2281 FINAL_JUMP(*L_slow_path); 2282 } 2283 } else if (super_check_offset.as_constant() == sc_offset) { 2284 // Need a slow path; fast failure is impossible. 2285 if (L_slow_path == &L_fallthrough) { 2286 brx(Assembler::equal, false, Assembler::pt, *L_success); 2287 delayed()->nop(); 2288 } else { 2289 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2290 delayed()->nop(); 2291 FINAL_JUMP(*L_success); 2292 } 2293 } else { 2294 // No slow path; it's a fast decision. 2295 if (L_failure == &L_fallthrough) { 2296 brx(Assembler::equal, false, Assembler::pt, *L_success); 2297 delayed()->nop(); 2298 } else { 2299 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2300 delayed()->nop(); 2301 FINAL_JUMP(*L_success); 2302 } 2303 } 2304 2305 bind(L_fallthrough); 2306 2307 #undef FINAL_JUMP 2308 } 2309 2310 2311 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2312 Register super_klass, 2313 Register count_temp, 2314 Register scan_temp, 2315 Register scratch_reg, 2316 Register coop_reg, 2317 Label* L_success, 2318 Label* L_failure) { 2319 assert_different_registers(sub_klass, super_klass, 2320 count_temp, scan_temp, scratch_reg, coop_reg); 2321 2322 Label L_fallthrough, L_loop; 2323 int label_nulls = 0; 2324 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2325 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2326 assert(label_nulls <= 1, "at most one NULL in the batch"); 2327 2328 // a couple of useful fields in sub_klass: 2329 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2330 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2331 2332 // Do a linear scan of the secondary super-klass chain. 2333 // This code is rarely used, so simplicity is a virtue here. 2334 2335 #ifndef PRODUCT 2336 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2337 inc_counter((address) pst_counter, count_temp, scan_temp); 2338 #endif 2339 2340 // We will consult the secondary-super array. 2341 ld_ptr(sub_klass, ss_offset, scan_temp); 2342 2343 Register search_key = super_klass; 2344 2345 // Load the array length. (Positive movl does right thing on LP64.) 2346 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2347 2348 // Check for empty secondary super list 2349 tst(count_temp); 2350 2351 // In the array of super classes elements are pointer sized. 2352 int element_size = wordSize; 2353 2354 // Top of search loop 2355 bind(L_loop); 2356 br(Assembler::equal, false, Assembler::pn, *L_failure); 2357 delayed()->add(scan_temp, element_size, scan_temp); 2358 2359 // Skip the array header in all array accesses. 2360 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2361 elem_offset -= element_size; // the scan pointer was pre-incremented also 2362 2363 // Load next super to check 2364 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2365 2366 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2367 cmp(scratch_reg, search_key); 2368 2369 // A miss means we are NOT a subtype and need to keep looping 2370 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2371 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2372 2373 // Success. Cache the super we found and proceed in triumph. 2374 st_ptr(super_klass, sub_klass, sc_offset); 2375 2376 if (L_success != &L_fallthrough) { 2377 ba(*L_success); 2378 delayed()->nop(); 2379 } 2380 2381 bind(L_fallthrough); 2382 } 2383 2384 2385 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2386 Register temp_reg, 2387 int extra_slot_offset) { 2388 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2389 int stackElementSize = Interpreter::stackElementSize; 2390 int offset = extra_slot_offset * stackElementSize; 2391 if (arg_slot.is_constant()) { 2392 offset += arg_slot.as_constant() * stackElementSize; 2393 return offset; 2394 } else { 2395 assert(temp_reg != noreg, "must specify"); 2396 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2397 if (offset != 0) 2398 add(temp_reg, offset, temp_reg); 2399 return temp_reg; 2400 } 2401 } 2402 2403 2404 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2405 Register temp_reg, 2406 int extra_slot_offset) { 2407 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2408 } 2409 2410 2411 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2412 Register temp_reg, 2413 Label& done, Label* slow_case, 2414 BiasedLockingCounters* counters) { 2415 assert(UseBiasedLocking, "why call this otherwise?"); 2416 2417 if (PrintBiasedLockingStatistics) { 2418 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2419 if (counters == NULL) 2420 counters = BiasedLocking::counters(); 2421 } 2422 2423 Label cas_label; 2424 2425 // Biased locking 2426 // See whether the lock is currently biased toward our thread and 2427 // whether the epoch is still valid 2428 // Note that the runtime guarantees sufficient alignment of JavaThread 2429 // pointers to allow age to be placed into low bits 2430 assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2431 and3(mark_reg, markWord::biased_lock_mask_in_place, temp_reg); 2432 cmp_and_brx_short(temp_reg, markWord::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2433 2434 load_klass(obj_reg, temp_reg); 2435 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2436 or3(G2_thread, temp_reg, temp_reg); 2437 xor3(mark_reg, temp_reg, temp_reg); 2438 andcc(temp_reg, ~((int) markWord::age_mask_in_place), temp_reg); 2439 if (counters != NULL) { 2440 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2441 // Reload mark_reg as we may need it later 2442 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2443 } 2444 brx(Assembler::equal, true, Assembler::pt, done); 2445 delayed()->nop(); 2446 2447 Label try_revoke_bias; 2448 Label try_rebias; 2449 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2450 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2451 2452 // At this point we know that the header has the bias pattern and 2453 // that we are not the bias owner in the current epoch. We need to 2454 // figure out more details about the state of the header in order to 2455 // know what operations can be legally performed on the object's 2456 // header. 2457 2458 // If the low three bits in the xor result aren't clear, that means 2459 // the prototype header is no longer biased and we have to revoke 2460 // the bias on this object. 2461 btst(markWord::biased_lock_mask_in_place, temp_reg); 2462 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2463 2464 // Biasing is still enabled for this data type. See whether the 2465 // epoch of the current bias is still valid, meaning that the epoch 2466 // bits of the mark word are equal to the epoch bits of the 2467 // prototype header. (Note that the prototype header's epoch bits 2468 // only change at a safepoint.) If not, attempt to rebias the object 2469 // toward the current thread. Note that we must be absolutely sure 2470 // that the current epoch is invalid in order to do this because 2471 // otherwise the manipulations it performs on the mark word are 2472 // illegal. 2473 delayed()->btst(markWord::epoch_mask_in_place, temp_reg); 2474 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2475 2476 // The epoch of the current bias is still valid but we know nothing 2477 // about the owner; it might be set or it might be clear. Try to 2478 // acquire the bias of the object using an atomic operation. If this 2479 // fails we will go in to the runtime to revoke the object's bias. 2480 // Note that we first construct the presumed unbiased header so we 2481 // don't accidentally blow away another thread's valid bias. 2482 delayed()->and3(mark_reg, 2483 markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place, 2484 mark_reg); 2485 or3(G2_thread, mark_reg, temp_reg); 2486 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2487 // If the biasing toward our thread failed, this means that 2488 // another thread succeeded in biasing it toward itself and we 2489 // need to revoke that bias. The revocation will occur in the 2490 // interpreter runtime in the slow case. 2491 cmp(mark_reg, temp_reg); 2492 if (counters != NULL) { 2493 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2494 } 2495 if (slow_case != NULL) { 2496 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2497 delayed()->nop(); 2498 } 2499 ba_short(done); 2500 2501 bind(try_rebias); 2502 // At this point we know the epoch has expired, meaning that the 2503 // current "bias owner", if any, is actually invalid. Under these 2504 // circumstances _only_, we are allowed to use the current header's 2505 // value as the comparison value when doing the cas to acquire the 2506 // bias in the current epoch. In other words, we allow transfer of 2507 // the bias from one thread to another directly in this situation. 2508 // 2509 // FIXME: due to a lack of registers we currently blow away the age 2510 // bits in this situation. Should attempt to preserve them. 2511 load_klass(obj_reg, temp_reg); 2512 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2513 or3(G2_thread, temp_reg, temp_reg); 2514 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2515 // If the biasing toward our thread failed, this means that 2516 // another thread succeeded in biasing it toward itself and we 2517 // need to revoke that bias. The revocation will occur in the 2518 // interpreter runtime in the slow case. 2519 cmp(mark_reg, temp_reg); 2520 if (counters != NULL) { 2521 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2522 } 2523 if (slow_case != NULL) { 2524 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2525 delayed()->nop(); 2526 } 2527 ba_short(done); 2528 2529 bind(try_revoke_bias); 2530 // The prototype mark in the klass doesn't have the bias bit set any 2531 // more, indicating that objects of this data type are not supposed 2532 // to be biased any more. We are going to try to reset the mark of 2533 // this object to the prototype value and fall through to the 2534 // CAS-based locking scheme. Note that if our CAS fails, it means 2535 // that another thread raced us for the privilege of revoking the 2536 // bias of this particular object, so it's okay to continue in the 2537 // normal locking code. 2538 // 2539 // FIXME: due to a lack of registers we currently blow away the age 2540 // bits in this situation. Should attempt to preserve them. 2541 load_klass(obj_reg, temp_reg); 2542 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2543 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2544 // Fall through to the normal CAS-based lock, because no matter what 2545 // the result of the above CAS, some thread must have succeeded in 2546 // removing the bias bit from the object's header. 2547 if (counters != NULL) { 2548 cmp(mark_reg, temp_reg); 2549 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2550 } 2551 2552 bind(cas_label); 2553 } 2554 2555 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2556 bool allow_delay_slot_filling) { 2557 // Check for biased locking unlock case, which is a no-op 2558 // Note: we do not have to check the thread ID for two reasons. 2559 // First, the interpreter checks for IllegalMonitorStateException at 2560 // a higher level. Second, if the bias was revoked while we held the 2561 // lock, the object could not be rebiased toward another thread, so 2562 // the bias bit would be clear. 2563 ld_ptr(mark_addr, temp_reg); 2564 and3(temp_reg, markWord::biased_lock_mask_in_place, temp_reg); 2565 cmp(temp_reg, markWord::biased_lock_pattern); 2566 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2567 delayed(); 2568 if (!allow_delay_slot_filling) { 2569 nop(); 2570 } 2571 } 2572 2573 2574 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2575 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2576 // The code could be tightened up considerably. 2577 // 2578 // box->dhw disposition - post-conditions at DONE_LABEL. 2579 // - Successful inflated lock: box->dhw != 0. 2580 // Any non-zero value suffices. 2581 // Consider G2_thread, rsp, boxReg, or markWord::unused_mark() 2582 // - Successful Stack-lock: box->dhw == mark. 2583 // box->dhw must contain the displaced mark word value 2584 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2585 // The slow-path enter() is responsible for setting 2586 // box->dhw = NonZero (typically markWord::unused_mark()). 2587 // - Biased: box->dhw is undefined 2588 // 2589 // SPARC refworkload performance - specifically jetstream and scimark - are 2590 // extremely sensitive to the size of the code emitted by compiler_lock_object 2591 // and compiler_unlock_object. Critically, the key factor is code size, not path 2592 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2593 // effect). 2594 2595 2596 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2597 Register Rbox, Register Rscratch, 2598 BiasedLockingCounters* counters, 2599 bool try_bias) { 2600 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2601 2602 verify_oop(Roop); 2603 Label done ; 2604 2605 if (counters != NULL) { 2606 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2607 } 2608 2609 // Aggressively avoid the Store-before-CAS penalty 2610 // Defer the store into box->dhw until after the CAS 2611 Label IsInflated, Recursive ; 2612 2613 // Anticipate CAS -- Avoid RTS->RTO upgrade 2614 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2615 2616 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2617 // Triage: biased, stack-locked, neutral, inflated 2618 2619 if (try_bias) { 2620 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2621 // Invariant: if control reaches this point in the emitted stream 2622 // then Rmark has not been modified. 2623 } 2624 andcc(Rmark, 2, G0); 2625 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2626 delayed()-> // Beware - dangling delay-slot 2627 2628 // Try stack-lock acquisition. 2629 // Transiently install BUSY (0) encoding in the mark word. 2630 // if the CAS of 0 into the mark was successful then we execute: 2631 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2632 // ST obj->mark = box -- overwrite transient 0 value 2633 // This presumes TSO, of course. 2634 2635 mov(0, Rscratch); 2636 or3(Rmark, markWord::unlocked_value, Rmark); 2637 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2638 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2639 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2640 cmp(Rscratch, Rmark); 2641 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2642 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2643 if (counters != NULL) { 2644 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2645 } 2646 ba(done); 2647 delayed()->st_ptr(Rbox, mark_addr); 2648 2649 bind(Recursive); 2650 // Stack-lock attempt failed - check for recursive stack-lock. 2651 // Tests show that we can remove the recursive case with no impact 2652 // on refworkload 0.83. If we need to reduce the size of the code 2653 // emitted by compiler_lock_object() the recursive case is perfect 2654 // candidate. 2655 // 2656 // A more extreme idea is to always inflate on stack-lock recursion. 2657 // This lets us eliminate the recursive checks in compiler_lock_object 2658 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2659 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2660 // and showed a performance *increase*. In the same experiment I eliminated 2661 // the fast-path stack-lock code from the interpreter and always passed 2662 // control to the "slow" operators in synchronizer.cpp. 2663 2664 // RScratch contains the fetched obj->mark value from the failed CAS. 2665 sub(Rscratch, STACK_BIAS, Rscratch); 2666 sub(Rscratch, SP, Rscratch); 2667 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2668 andcc(Rscratch, 0xfffff003, Rscratch); 2669 if (counters != NULL) { 2670 // Accounting needs the Rscratch register 2671 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2672 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2673 ba_short(done); 2674 } else { 2675 ba(done); 2676 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2677 } 2678 2679 bind (IsInflated); 2680 2681 // Try to CAS m->owner from null to Self 2682 // Invariant: if we acquire the lock then _recursions should be 0. 2683 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2684 mov(G2_thread, Rscratch); 2685 cas_ptr(Rmark, G0, Rscratch); 2686 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2687 // set icc.zf : 1=success 0=failure 2688 // ST box->displaced_header = NonZero. 2689 // Any non-zero value suffices: 2690 // markWord::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2691 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2692 // Intentional fall-through into done 2693 2694 bind (done); 2695 } 2696 2697 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2698 Register Rbox, Register Rscratch, 2699 bool try_bias) { 2700 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2701 2702 Label done ; 2703 2704 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2705 // is too large performance rolls abruptly off a cliff. 2706 // This could be related to inlining policies, code cache management, or 2707 // I$ effects. 2708 Label LStacked ; 2709 2710 if (try_bias) { 2711 // TODO: eliminate redundant LDs of obj->mark 2712 biased_locking_exit(mark_addr, Rscratch, done); 2713 } 2714 2715 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2716 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2717 andcc(Rscratch, Rscratch, G0); 2718 brx(Assembler::zero, false, Assembler::pn, done); 2719 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2720 andcc(Rmark, 2, G0); 2721 brx(Assembler::zero, false, Assembler::pt, LStacked); 2722 delayed()->nop(); 2723 2724 // It's inflated 2725 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2726 // the ST of 0 into _owner which releases the lock. This prevents loads 2727 // and stores within the critical section from reordering (floating) 2728 // past the store that releases the lock. But TSO is a strong memory model 2729 // and that particular flavor of barrier is a noop, so we can safely elide it. 2730 // Note that we use 1-0 locking by default for the inflated case. We 2731 // close the resultant (and rare) race by having contended threads in 2732 // monitorenter periodically poll _owner. 2733 2734 // 1-0 form : avoids CAS and MEMBAR in the common case 2735 // Do not bother to ratify that m->Owner == Self. 2736 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2737 orcc(Rbox, G0, G0); 2738 brx(Assembler::notZero, false, Assembler::pn, done); 2739 delayed()-> 2740 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2741 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2742 orcc(Rbox, Rscratch, G0); 2743 brx(Assembler::zero, false, Assembler::pt, done); 2744 delayed()-> 2745 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2746 2747 membar(StoreLoad); 2748 // Check that _succ is (or remains) non-zero 2749 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2750 andcc(Rscratch, Rscratch, G0); 2751 brx(Assembler::notZero, false, Assembler::pt, done); 2752 delayed()->andcc(G0, G0, G0); 2753 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2754 mov(G2_thread, Rscratch); 2755 cas_ptr(Rmark, G0, Rscratch); 2756 cmp(Rscratch, G0); 2757 // invert icc.zf and goto done 2758 // A slightly better v8+/v9 idiom would be the following: 2759 // movrnz Rscratch,1,Rscratch 2760 // ba done 2761 // xorcc Rscratch,1,G0 2762 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2763 brx(Assembler::notZero, false, Assembler::pt, done); 2764 delayed()->cmp(G0, G0); 2765 br(Assembler::always, false, Assembler::pt, done); 2766 delayed()->cmp(G0, 1); 2767 2768 bind (LStacked); 2769 // Consider: we could replace the expensive CAS in the exit 2770 // path with a simple ST of the displaced mark value fetched from 2771 // the on-stack basiclock box. That admits a race where a thread T2 2772 // in the slow lock path -- inflating with monitor M -- could race a 2773 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2774 // More precisely T1 in the stack-lock unlock path could "stomp" the 2775 // inflated mark value M installed by T2, resulting in an orphan 2776 // object monitor M and T2 becoming stranded. We can remedy that situation 2777 // by having T2 periodically poll the object's mark word using timed wait 2778 // operations. If T2 discovers that a stomp has occurred it vacates 2779 // the monitor M and wakes any other threads stranded on the now-orphan M. 2780 // In addition the monitor scavenger, which performs deflation, 2781 // would also need to check for orpan monitors and stranded threads. 2782 // 2783 // Finally, inflation is also used when T2 needs to assign a hashCode 2784 // to O and O is stack-locked by T1. The "stomp" race could cause 2785 // an assigned hashCode value to be lost. We can avoid that condition 2786 // and provide the necessary hashCode stability invariants by ensuring 2787 // that hashCode generation is idempotent between copying GCs. 2788 // For example we could compute the hashCode of an object O as 2789 // O's heap address XOR some high quality RNG value that is refreshed 2790 // at GC-time. The monitor scavenger would install the hashCode 2791 // found in any orphan monitors. Again, the mechanism admits a 2792 // lost-update "stomp" WAW race but detects and recovers as needed. 2793 // 2794 // A prototype implementation showed excellent results, although 2795 // the scavenger and timeout code was rather involved. 2796 2797 cas_ptr(mark_addr.base(), Rbox, Rscratch); 2798 cmp(Rbox, Rscratch); 2799 // Intentional fall through into done ... 2800 2801 bind(done); 2802 } 2803 2804 void MacroAssembler::verify_tlab() { 2805 #ifdef ASSERT 2806 if (UseTLAB && VerifyOops) { 2807 Label next, next2, ok; 2808 Register t1 = L0; 2809 Register t2 = L1; 2810 Register t3 = L2; 2811 2812 save_frame(0); 2813 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 2814 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 2815 or3(t1, t2, t3); 2816 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 2817 STOP("assert(top >= start)"); 2818 should_not_reach_here(); 2819 2820 bind(next); 2821 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 2822 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 2823 or3(t3, t2, t3); 2824 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 2825 STOP("assert(top <= end)"); 2826 should_not_reach_here(); 2827 2828 bind(next2); 2829 and3(t3, MinObjAlignmentInBytesMask, t3); 2830 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 2831 STOP("assert(aligned)"); 2832 should_not_reach_here(); 2833 2834 bind(ok); 2835 restore(); 2836 } 2837 #endif 2838 } 2839 2840 2841 void MacroAssembler::eden_allocate( 2842 Register obj, // result: pointer to object after successful allocation 2843 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2844 int con_size_in_bytes, // object size in bytes if known at compile time 2845 Register t1, // temp register 2846 Register t2, // temp register 2847 Label& slow_case // continuation point if fast allocation fails 2848 ){ 2849 // make sure arguments make sense 2850 assert_different_registers(obj, var_size_in_bytes, t1, t2); 2851 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 2852 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 2853 2854 if (!Universe::heap()->supports_inline_contig_alloc()) { 2855 // No allocation in the shared eden. 2856 ba(slow_case); 2857 delayed()->nop(); 2858 } else { 2859 // get eden boundaries 2860 // note: we need both top & top_addr! 2861 const Register top_addr = t1; 2862 const Register end = t2; 2863 2864 CollectedHeap* ch = Universe::heap(); 2865 set((intx)ch->top_addr(), top_addr); 2866 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 2867 ld_ptr(top_addr, delta, end); 2868 ld_ptr(top_addr, 0, obj); 2869 2870 // try to allocate 2871 Label retry; 2872 bind(retry); 2873 #ifdef ASSERT 2874 // make sure eden top is properly aligned 2875 { 2876 Label L; 2877 btst(MinObjAlignmentInBytesMask, obj); 2878 br(Assembler::zero, false, Assembler::pt, L); 2879 delayed()->nop(); 2880 STOP("eden top is not properly aligned"); 2881 bind(L); 2882 } 2883 #endif // ASSERT 2884 const Register free = end; 2885 sub(end, obj, free); // compute amount of free space 2886 if (var_size_in_bytes->is_valid()) { 2887 // size is unknown at compile time 2888 cmp(free, var_size_in_bytes); 2889 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 2890 delayed()->add(obj, var_size_in_bytes, end); 2891 } else { 2892 // size is known at compile time 2893 cmp(free, con_size_in_bytes); 2894 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 2895 delayed()->add(obj, con_size_in_bytes, end); 2896 } 2897 // Compare obj with the value at top_addr; if still equal, swap the value of 2898 // end with the value at top_addr. If not equal, read the value at top_addr 2899 // into end. 2900 cas_ptr(top_addr, obj, end); 2901 // if someone beat us on the allocation, try again, otherwise continue 2902 cmp(obj, end); 2903 brx(Assembler::notEqual, false, Assembler::pn, retry); 2904 delayed()->mov(end, obj); // nop if successfull since obj == end 2905 2906 #ifdef ASSERT 2907 // make sure eden top is properly aligned 2908 { 2909 Label L; 2910 const Register top_addr = t1; 2911 2912 set((intx)ch->top_addr(), top_addr); 2913 ld_ptr(top_addr, 0, top_addr); 2914 btst(MinObjAlignmentInBytesMask, top_addr); 2915 br(Assembler::zero, false, Assembler::pt, L); 2916 delayed()->nop(); 2917 STOP("eden top is not properly aligned"); 2918 bind(L); 2919 } 2920 #endif // ASSERT 2921 } 2922 } 2923 2924 2925 void MacroAssembler::tlab_allocate( 2926 Register obj, // result: pointer to object after successful allocation 2927 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2928 int con_size_in_bytes, // object size in bytes if known at compile time 2929 Register t1, // temp register 2930 Label& slow_case // continuation point if fast allocation fails 2931 ){ 2932 // make sure arguments make sense 2933 assert_different_registers(obj, var_size_in_bytes, t1); 2934 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 2935 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 2936 2937 const Register free = t1; 2938 2939 verify_tlab(); 2940 2941 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 2942 2943 // calculate amount of free space 2944 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 2945 sub(free, obj, free); 2946 2947 Label done; 2948 if (var_size_in_bytes == noreg) { 2949 cmp(free, con_size_in_bytes); 2950 } else { 2951 cmp(free, var_size_in_bytes); 2952 } 2953 br(Assembler::less, false, Assembler::pn, slow_case); 2954 // calculate the new top pointer 2955 if (var_size_in_bytes == noreg) { 2956 delayed()->add(obj, con_size_in_bytes, free); 2957 } else { 2958 delayed()->add(obj, var_size_in_bytes, free); 2959 } 2960 2961 bind(done); 2962 2963 #ifdef ASSERT 2964 // make sure new free pointer is properly aligned 2965 { 2966 Label L; 2967 btst(MinObjAlignmentInBytesMask, free); 2968 br(Assembler::zero, false, Assembler::pt, L); 2969 delayed()->nop(); 2970 STOP("updated TLAB free is not properly aligned"); 2971 bind(L); 2972 } 2973 #endif // ASSERT 2974 2975 // update the tlab top pointer 2976 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 2977 verify_tlab(); 2978 } 2979 2980 void MacroAssembler::zero_memory(Register base, Register index) { 2981 assert_different_registers(base, index); 2982 Label loop; 2983 bind(loop); 2984 subcc(index, HeapWordSize, index); 2985 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 2986 delayed()->st_ptr(G0, base, index); 2987 } 2988 2989 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 2990 Register t1, Register t2) { 2991 // Bump total bytes allocated by this thread 2992 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 2993 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 2994 // v8 support has gone the way of the dodo 2995 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 2996 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 2997 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 2998 } 2999 3000 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3001 switch (cond) { 3002 // Note some conditions are synonyms for others 3003 case Assembler::never: return Assembler::always; 3004 case Assembler::zero: return Assembler::notZero; 3005 case Assembler::lessEqual: return Assembler::greater; 3006 case Assembler::less: return Assembler::greaterEqual; 3007 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3008 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3009 case Assembler::negative: return Assembler::positive; 3010 case Assembler::overflowSet: return Assembler::overflowClear; 3011 case Assembler::always: return Assembler::never; 3012 case Assembler::notZero: return Assembler::zero; 3013 case Assembler::greater: return Assembler::lessEqual; 3014 case Assembler::greaterEqual: return Assembler::less; 3015 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3016 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3017 case Assembler::positive: return Assembler::negative; 3018 case Assembler::overflowClear: return Assembler::overflowSet; 3019 } 3020 3021 ShouldNotReachHere(); return Assembler::overflowClear; 3022 } 3023 3024 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3025 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3026 Condition negated_cond = negate_condition(cond); 3027 Label L; 3028 brx(negated_cond, false, Assembler::pt, L); 3029 delayed()->nop(); 3030 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3031 bind(L); 3032 } 3033 3034 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3035 AddressLiteral addrlit(counter_addr); 3036 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3037 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3038 ld(addr, Rtmp2); 3039 inc(Rtmp2); 3040 st(Rtmp2, addr); 3041 } 3042 3043 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3044 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3045 } 3046 3047 SkipIfEqual::SkipIfEqual( 3048 MacroAssembler* masm, Register temp, const bool* flag_addr, 3049 Assembler::Condition condition) { 3050 _masm = masm; 3051 AddressLiteral flag(flag_addr); 3052 _masm->sethi(flag, temp); 3053 _masm->ldub(temp, flag.low10(), temp); 3054 _masm->tst(temp); 3055 _masm->br(condition, false, Assembler::pt, _label); 3056 _masm->delayed()->nop(); 3057 } 3058 3059 SkipIfEqual::~SkipIfEqual() { 3060 _masm->bind(_label); 3061 } 3062 3063 void MacroAssembler::bang_stack_with_offset(int offset) { 3064 // stack grows down, caller passes positive offset 3065 assert(offset > 0, "must bang with negative offset"); 3066 set((-offset)+STACK_BIAS, G3_scratch); 3067 st(G0, SP, G3_scratch); 3068 } 3069 3070 // Writes to stack successive pages until offset reached to check for 3071 // stack overflow + shadow pages. This clobbers tsp and scratch. 3072 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3073 Register Rscratch) { 3074 // Use stack pointer in temp stack pointer 3075 mov(SP, Rtsp); 3076 3077 // Bang stack for total size given plus stack shadow page size. 3078 // Bang one page at a time because a large size can overflow yellow and 3079 // red zones (the bang will fail but stack overflow handling can't tell that 3080 // it was a stack overflow bang vs a regular segv). 3081 int offset = os::vm_page_size(); 3082 Register Roffset = Rscratch; 3083 3084 Label loop; 3085 bind(loop); 3086 set((-offset)+STACK_BIAS, Rscratch); 3087 st(G0, Rtsp, Rscratch); 3088 set(offset, Roffset); 3089 sub(Rsize, Roffset, Rsize); 3090 cmp(Rsize, G0); 3091 br(Assembler::greater, false, Assembler::pn, loop); 3092 delayed()->sub(Rtsp, Roffset, Rtsp); 3093 3094 // Bang down shadow pages too. 3095 // At this point, (tmp-0) is the last address touched, so don't 3096 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3097 // was post-decremented.) Skip this address by starting at i=1, and 3098 // touch a few more pages below. N.B. It is important to touch all 3099 // the way down to and including i=StackShadowPages. 3100 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3101 set((-i*offset)+STACK_BIAS, Rscratch); 3102 st(G0, Rtsp, Rscratch); 3103 } 3104 } 3105 3106 void MacroAssembler::reserved_stack_check() { 3107 // testing if reserved zone needs to be enabled 3108 Label no_reserved_zone_enabling; 3109 3110 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3111 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3112 3113 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3114 3115 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3116 jump_to(stub, G4_scratch); 3117 delayed()->restore(); 3118 3119 should_not_reach_here(); 3120 3121 bind(no_reserved_zone_enabling); 3122 } 3123 // ((OopHandle)result).resolve(); 3124 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 3125 // OopHandle::resolve is an indirection. 3126 access_load_at(T_OBJECT, IN_NATIVE, Address(result, 0), result, tmp); 3127 } 3128 3129 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 3130 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3131 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3132 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3133 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3134 ld_ptr(mirror, mirror_offset, mirror); 3135 resolve_oop_handle(mirror, tmp); 3136 } 3137 3138 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3139 // The number of bytes in this code is used by 3140 // MachCallDynamicJavaNode::ret_addr_offset() 3141 // if this changes, change that. 3142 if (UseCompressedClassPointers) { 3143 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3144 decode_klass_not_null(klass); 3145 } else { 3146 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3147 } 3148 } 3149 3150 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3151 if (UseCompressedClassPointers) { 3152 assert(dst_oop != klass, "not enough registers"); 3153 encode_klass_not_null(klass); 3154 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3155 } else { 3156 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3157 } 3158 } 3159 3160 void MacroAssembler::store_klass_gap(Register s, Register d) { 3161 if (UseCompressedClassPointers) { 3162 assert(s != d, "not enough registers"); 3163 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3164 } 3165 } 3166 3167 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 3168 Register src, Address dst, Register tmp) { 3169 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3170 decorators = AccessInternal::decorator_fixup(decorators); 3171 bool as_raw = (decorators & AS_RAW) != 0; 3172 if (as_raw) { 3173 bs->BarrierSetAssembler::store_at(this, decorators, type, src, dst, tmp); 3174 } else { 3175 bs->store_at(this, decorators, type, src, dst, tmp); 3176 } 3177 } 3178 3179 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 3180 Address src, Register dst, Register tmp) { 3181 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3182 decorators = AccessInternal::decorator_fixup(decorators); 3183 bool as_raw = (decorators & AS_RAW) != 0; 3184 if (as_raw) { 3185 bs->BarrierSetAssembler::load_at(this, decorators, type, src, dst, tmp); 3186 } else { 3187 bs->load_at(this, decorators, type, src, dst, tmp); 3188 } 3189 } 3190 3191 void MacroAssembler::load_heap_oop(const Address& s, Register d, Register tmp, DecoratorSet decorators) { 3192 access_load_at(T_OBJECT, IN_HEAP | decorators, s, d, tmp); 3193 } 3194 3195 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d, Register tmp, DecoratorSet decorators) { 3196 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2), d, tmp); 3197 } 3198 3199 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d, Register tmp, DecoratorSet decorators) { 3200 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, simm13a), d, tmp); 3201 } 3202 3203 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d, Register tmp, DecoratorSet decorators) { 3204 if (s2.is_constant()) { 3205 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_constant()), d, tmp); 3206 } else { 3207 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_register()), d, tmp); 3208 } 3209 } 3210 3211 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2, Register tmp, DecoratorSet decorators) { 3212 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, s2), tmp); 3213 } 3214 3215 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a, Register tmp, DecoratorSet decorators) { 3216 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, simm13a), tmp); 3217 } 3218 3219 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset, Register tmp, DecoratorSet decorators) { 3220 if (a.has_index()) { 3221 assert(!a.has_disp(), "not supported yet"); 3222 assert(offset == 0, "not supported yet"); 3223 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.index()), tmp); 3224 } else { 3225 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.disp() + offset), tmp); 3226 } 3227 } 3228 3229 3230 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3231 assert (UseCompressedOops, "must be compressed"); 3232 assert (Universe::heap() != NULL, "java heap should be initialized"); 3233 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3234 verify_oop(src); 3235 if (CompressedOops::base() == NULL) { 3236 srlx(src, LogMinObjAlignmentInBytes, dst); 3237 return; 3238 } 3239 Label done; 3240 if (src == dst) { 3241 // optimize for frequent case src == dst 3242 bpr(rc_nz, true, Assembler::pt, src, done); 3243 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3244 bind(done); 3245 srlx(src, LogMinObjAlignmentInBytes, dst); 3246 } else { 3247 bpr(rc_z, false, Assembler::pn, src, done); 3248 delayed() -> mov(G0, dst); 3249 // could be moved before branch, and annulate delay, 3250 // but may add some unneeded work decoding null 3251 sub(src, G6_heapbase, dst); 3252 srlx(dst, LogMinObjAlignmentInBytes, dst); 3253 bind(done); 3254 } 3255 } 3256 3257 3258 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3259 assert (UseCompressedOops, "must be compressed"); 3260 assert (Universe::heap() != NULL, "java heap should be initialized"); 3261 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3262 verify_oop(r); 3263 if (CompressedOops::base() != NULL) 3264 sub(r, G6_heapbase, r); 3265 srlx(r, LogMinObjAlignmentInBytes, r); 3266 } 3267 3268 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3269 assert (UseCompressedOops, "must be compressed"); 3270 assert (Universe::heap() != NULL, "java heap should be initialized"); 3271 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3272 verify_oop(src); 3273 if (CompressedOops::base() == NULL) { 3274 srlx(src, LogMinObjAlignmentInBytes, dst); 3275 } else { 3276 sub(src, G6_heapbase, dst); 3277 srlx(dst, LogMinObjAlignmentInBytes, dst); 3278 } 3279 } 3280 3281 // Same algorithm as oops.inline.hpp decode_heap_oop. 3282 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3283 assert (UseCompressedOops, "must be compressed"); 3284 assert (Universe::heap() != NULL, "java heap should be initialized"); 3285 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3286 sllx(src, LogMinObjAlignmentInBytes, dst); 3287 if (CompressedOops::base() != NULL) { 3288 Label done; 3289 bpr(rc_nz, true, Assembler::pt, dst, done); 3290 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3291 bind(done); 3292 } 3293 verify_oop(dst); 3294 } 3295 3296 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3297 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3298 // pd_code_size_limit. 3299 // Also do not verify_oop as this is called by verify_oop. 3300 assert (UseCompressedOops, "must be compressed"); 3301 assert (Universe::heap() != NULL, "java heap should be initialized"); 3302 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3303 sllx(r, LogMinObjAlignmentInBytes, r); 3304 if (CompressedOops::base() != NULL) 3305 add(r, G6_heapbase, r); 3306 } 3307 3308 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3309 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3310 // pd_code_size_limit. 3311 // Also do not verify_oop as this is called by verify_oop. 3312 assert (UseCompressedOops, "must be compressed"); 3313 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3314 sllx(src, LogMinObjAlignmentInBytes, dst); 3315 if (CompressedOops::base() != NULL) 3316 add(dst, G6_heapbase, dst); 3317 } 3318 3319 void MacroAssembler::encode_klass_not_null(Register r) { 3320 assert (UseCompressedClassPointers, "must be compressed"); 3321 if (CompressedKlassPointers::base() != NULL) { 3322 assert(r != G6_heapbase, "bad register choice"); 3323 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3324 sub(r, G6_heapbase, r); 3325 if (CompressedKlassPointers::shift() != 0) { 3326 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 3327 srlx(r, LogKlassAlignmentInBytes, r); 3328 } 3329 reinit_heapbase(); 3330 } else { 3331 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3332 srlx(r, CompressedKlassPointers::shift(), r); 3333 } 3334 } 3335 3336 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3337 if (src == dst) { 3338 encode_klass_not_null(src); 3339 } else { 3340 assert (UseCompressedClassPointers, "must be compressed"); 3341 if (CompressedKlassPointers::base() != NULL) { 3342 set((intptr_t)CompressedKlassPointers::base(), dst); 3343 sub(src, dst, dst); 3344 if (CompressedKlassPointers::shift() != 0) { 3345 srlx(dst, LogKlassAlignmentInBytes, dst); 3346 } 3347 } else { 3348 // shift src into dst 3349 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3350 srlx(src, CompressedKlassPointers::shift(), dst); 3351 } 3352 } 3353 } 3354 3355 // Function instr_size_for_decode_klass_not_null() counts the instructions 3356 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3357 // the instructions they generate change, then this method needs to be updated. 3358 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3359 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3360 int num_instrs = 1; // shift src,dst or add 3361 if (CompressedKlassPointers::base() != NULL) { 3362 // set + add + set 3363 num_instrs += insts_for_internal_set((intptr_t)CompressedKlassPointers::base()) + 3364 insts_for_internal_set((intptr_t)CompressedOops::ptrs_base()); 3365 if (CompressedKlassPointers::shift() != 0) { 3366 num_instrs += 1; // sllx 3367 } 3368 } 3369 return num_instrs * BytesPerInstWord; 3370 } 3371 3372 // !!! If the instructions that get generated here change then function 3373 // instr_size_for_decode_klass_not_null() needs to get updated. 3374 void MacroAssembler::decode_klass_not_null(Register r) { 3375 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3376 // pd_code_size_limit. 3377 assert (UseCompressedClassPointers, "must be compressed"); 3378 if (CompressedKlassPointers::base() != NULL) { 3379 assert(r != G6_heapbase, "bad register choice"); 3380 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3381 if (CompressedKlassPointers::shift() != 0) 3382 sllx(r, LogKlassAlignmentInBytes, r); 3383 add(r, G6_heapbase, r); 3384 reinit_heapbase(); 3385 } else { 3386 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3387 sllx(r, CompressedKlassPointers::shift(), r); 3388 } 3389 } 3390 3391 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 3392 if (src == dst) { 3393 decode_klass_not_null(src); 3394 } else { 3395 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3396 // pd_code_size_limit. 3397 assert (UseCompressedClassPointers, "must be compressed"); 3398 if (CompressedKlassPointers::base() != NULL) { 3399 if (CompressedKlassPointers::shift() != 0) { 3400 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 3401 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3402 sllx(src, LogKlassAlignmentInBytes, dst); 3403 add(dst, G6_heapbase, dst); 3404 reinit_heapbase(); 3405 } else { 3406 set((intptr_t)CompressedKlassPointers::base(), dst); 3407 add(src, dst, dst); 3408 } 3409 } else { 3410 // shift/mov src into dst. 3411 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3412 sllx(src, CompressedKlassPointers::shift(), dst); 3413 } 3414 } 3415 } 3416 3417 void MacroAssembler::reinit_heapbase() { 3418 if (UseCompressedOops || UseCompressedClassPointers) { 3419 if (Universe::heap() != NULL) { 3420 set((intptr_t)CompressedOops::ptrs_base(), G6_heapbase); 3421 } else { 3422 AddressLiteral base(CompressedOops::ptrs_base_addr()); 3423 load_ptr_contents(base, G6_heapbase); 3424 } 3425 } 3426 } 3427 3428 // Use BIS for zeroing (count is in bytes). 3429 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 3430 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 3431 Register end = count; 3432 int cache_line_size = VM_Version::prefetch_data_size(); 3433 assert(cache_line_size > 0, "cache line size should be known for this code"); 3434 // Minimum count when BIS zeroing can be used since 3435 // it needs membar which is expensive. 3436 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 3437 3438 Label small_loop; 3439 // Check if count is negative (dead code) or zero. 3440 // Note, count uses 64bit in 64 bit VM. 3441 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 3442 3443 // Use BIS zeroing only for big arrays since it requires membar. 3444 if (Assembler::is_simm13(block_zero_size)) { // < 4096 3445 cmp(count, block_zero_size); 3446 } else { 3447 set(block_zero_size, temp); 3448 cmp(count, temp); 3449 } 3450 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 3451 delayed()->add(to, count, end); 3452 3453 // Note: size is >= three (32 bytes) cache lines. 3454 3455 // Clean the beginning of space up to next cache line. 3456 for (int offs = 0; offs < cache_line_size; offs += 8) { 3457 stx(G0, to, offs); 3458 } 3459 3460 // align to next cache line 3461 add(to, cache_line_size, to); 3462 and3(to, -cache_line_size, to); 3463 3464 // Note: size left >= two (32 bytes) cache lines. 3465 3466 // BIS should not be used to zero tail (64 bytes) 3467 // to avoid zeroing a header of the following object. 3468 sub(end, (cache_line_size*2)-8, end); 3469 3470 Label bis_loop; 3471 bind(bis_loop); 3472 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 3473 add(to, cache_line_size, to); 3474 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 3475 3476 // BIS needs membar. 3477 membar(Assembler::StoreLoad); 3478 3479 add(end, (cache_line_size*2)-8, end); // restore end 3480 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 3481 3482 // Clean the tail. 3483 bind(small_loop); 3484 stx(G0, to, 0); 3485 add(to, 8, to); 3486 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 3487 nop(); // Separate short branches 3488 } 3489 3490 /** 3491 * Update CRC-32[C] with a byte value according to constants in table 3492 * 3493 * @param [in,out]crc Register containing the crc. 3494 * @param [in]val Register containing the byte to fold into the CRC. 3495 * @param [in]table Register containing the table of crc constants. 3496 * 3497 * uint32_t crc; 3498 * val = crc_table[(val ^ crc) & 0xFF]; 3499 * crc = val ^ (crc >> 8); 3500 */ 3501 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 3502 xor3(val, crc, val); 3503 and3(val, 0xFF, val); 3504 sllx(val, 2, val); 3505 lduw(table, val, val); 3506 srlx(crc, 8, crc); 3507 xor3(val, crc, crc); 3508 } 3509 3510 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 3511 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 3512 srlx(src, 24, dst); 3513 3514 sllx(src, 32+8, tmp); 3515 srlx(tmp, 32+24, tmp); 3516 sllx(tmp, 8, tmp); 3517 or3(dst, tmp, dst); 3518 3519 sllx(src, 32+16, tmp); 3520 srlx(tmp, 32+24, tmp); 3521 sllx(tmp, 16, tmp); 3522 or3(dst, tmp, dst); 3523 3524 sllx(src, 32+24, tmp); 3525 srlx(tmp, 32, tmp); 3526 or3(dst, tmp, dst); 3527 } 3528 3529 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 3530 reverse_bytes_32(src, tmp1, tmp2); 3531 movxtod(tmp1, dst); 3532 } 3533 3534 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 3535 movdtox(src, tmp1); 3536 reverse_bytes_32(tmp1, dst, tmp2); 3537 } 3538 3539 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 3540 xmulx(xcrc_hi, xK_hi, xtmp_lo); 3541 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 3542 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 3543 xmulx(xcrc_lo, xK_lo, xcrc_lo); 3544 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 3545 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 3546 ldxl(buf, G0, xtmp_lo); 3547 inc(buf, 8); 3548 ldxl(buf, G0, xtmp_hi); 3549 inc(buf, 8); 3550 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 3551 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 3552 } 3553 3554 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 3555 mov(xcrc_lo, xtmp_lo); 3556 mov(xcrc_hi, xtmp_hi); 3557 xmulx(xtmp_hi, xK_hi, xtmp_lo); 3558 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 3559 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 3560 xmulx(xcrc_lo, xK_lo, xcrc_lo); 3561 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 3562 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 3563 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 3564 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 3565 } 3566 3567 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 3568 and3(xcrc, 0xFF, tmp); 3569 sllx(tmp, 2, tmp); 3570 lduw(table, tmp, xtmp); 3571 srlx(xcrc, 8, xcrc); 3572 xor3(xtmp, xcrc, xcrc); 3573 } 3574 3575 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 3576 and3(crc, 0xFF, tmp); 3577 srlx(crc, 8, crc); 3578 sllx(tmp, 2, tmp); 3579 lduw(table, tmp, tmp); 3580 xor3(tmp, crc, crc); 3581 } 3582 3583 #define CRC32_TMP_REG_NUM 18 3584 3585 #define CRC32_CONST_64 0x163cd6124 3586 #define CRC32_CONST_96 0x0ccaa009e 3587 #define CRC32_CONST_160 0x1751997d0 3588 #define CRC32_CONST_480 0x1c6e41596 3589 #define CRC32_CONST_544 0x154442bd4 3590 3591 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 3592 3593 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 3594 Label L_main_loop_prologue; 3595 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 3596 Label L_fold_tail, L_fold_tail_loop; 3597 Label L_8byte_fold_check; 3598 3599 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 3600 3601 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 3602 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 3603 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 3604 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 3605 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 3606 3607 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 3608 3609 not1(crc); // ~c 3610 clruwu(crc); // clear upper 32 bits of crc 3611 3612 // Check if below cutoff, proceed directly to cleanup code 3613 mov(31, G4); 3614 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 3615 3616 // Align buffer to 8 byte boundry 3617 mov(8, O5); 3618 and3(buf, 0x7, O4); 3619 sub(O5, O4, O5); 3620 and3(O5, 0x7, O5); 3621 sub(len, O5, len); 3622 ba(L_align_check); 3623 delayed()->nop(); 3624 3625 // Alignment loop, table look up method for up to 7 bytes 3626 bind(L_align_loop); 3627 ldub(buf, 0, O4); 3628 inc(buf); 3629 dec(O5); 3630 xor3(O4, crc, O4); 3631 and3(O4, 0xFF, O4); 3632 sllx(O4, 2, O4); 3633 lduw(table, O4, O4); 3634 srlx(crc, 8, crc); 3635 xor3(O4, crc, crc); 3636 bind(L_align_check); 3637 nop(); 3638 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 3639 3640 // Aligned on 64-bit (8-byte) boundry at this point 3641 // Check if still above cutoff (31-bytes) 3642 mov(31, G4); 3643 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 3644 // At least 32 bytes left to process 3645 3646 // Free up registers by storing them to FP registers 3647 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 3648 movxtod(tmp[i], as_FloatRegister(2*i)); 3649 } 3650 3651 // Determine which loop to enter 3652 // Shared prologue 3653 ldxl(buf, G0, tmp[0]); 3654 inc(buf, 8); 3655 ldxl(buf, G0, tmp[1]); 3656 inc(buf, 8); 3657 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 3658 and3(crc, 0, crc); // Clear out the crc register 3659 // Main loop needs 128-bytes at least 3660 mov(128, G4); 3661 mov(64, tmp[2]); 3662 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 3663 // Less than 64 bytes 3664 nop(); 3665 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 3666 // Between 64 and 127 bytes 3667 set64(CRC32_CONST_96, const_96, tmp[8]); 3668 set64(CRC32_CONST_160, const_160, tmp[9]); 3669 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 3670 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 3671 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 3672 dec(len, 48); 3673 ba(L_fold_tail); 3674 delayed()->nop(); 3675 3676 bind(L_main_loop_prologue); 3677 for (int i = 2; i < 8; i++) { 3678 ldxl(buf, G0, tmp[i]); 3679 inc(buf, 8); 3680 } 3681 3682 // Fold total 512 bits of polynomial on each iteration, 3683 // 128 bits per each of 4 parallel streams 3684 set64(CRC32_CONST_480, const_480, tmp[8]); 3685 set64(CRC32_CONST_544, const_544, tmp[9]); 3686 3687 mov(128, G4); 3688 bind(L_fold_512b_loop); 3689 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 3690 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 3691 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 3692 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 3693 dec(len, 64); 3694 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 3695 3696 // Fold 512 bits to 128 bits 3697 bind(L_fold_512b); 3698 set64(CRC32_CONST_96, const_96, tmp[8]); 3699 set64(CRC32_CONST_160, const_160, tmp[9]); 3700 3701 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 3702 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 3703 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 3704 dec(len, 48); 3705 3706 // Fold the rest of 128 bits data chunks 3707 bind(L_fold_tail); 3708 mov(32, G4); 3709 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 3710 3711 set64(CRC32_CONST_96, const_96, tmp[8]); 3712 set64(CRC32_CONST_160, const_160, tmp[9]); 3713 3714 bind(L_fold_tail_loop); 3715 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 3716 sub(len, 16, len); 3717 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 3718 3719 // Fold the 128 bits in tmps 0 - 1 into tmp 1 3720 bind(L_fold_128b); 3721 3722 set64(CRC32_CONST_64, const_64, tmp[4]); 3723 3724 xmulx(const_64, tmp[0], tmp[2]); 3725 xmulxhi(const_64, tmp[0], tmp[3]); 3726 3727 srl(tmp[2], G0, tmp[4]); 3728 xmulx(const_64, tmp[4], tmp[4]); 3729 3730 srlx(tmp[2], 32, tmp[2]); 3731 sllx(tmp[3], 32, tmp[3]); 3732 or3(tmp[2], tmp[3], tmp[2]); 3733 3734 xor3(tmp[4], tmp[1], tmp[4]); 3735 xor3(tmp[4], tmp[2], tmp[1]); 3736 dec(len, 8); 3737 3738 // Use table lookup for the 8 bytes left in tmp[1] 3739 dec(len, 8); 3740 3741 // 8 8-bit folds to compute 32-bit CRC. 3742 for (int j = 0; j < 4; j++) { 3743 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 3744 } 3745 srl(tmp[1], G0, crc); // move 32 bits to general register 3746 for (int j = 0; j < 4; j++) { 3747 fold_8bit_crc32(crc, table, tmp[3]); 3748 } 3749 3750 bind(L_8byte_fold_check); 3751 3752 // Restore int registers saved in FP registers 3753 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 3754 movdtox(as_FloatRegister(2*i), tmp[i]); 3755 } 3756 3757 ba(L_cleanup_check); 3758 delayed()->nop(); 3759 3760 // Table look-up method for the remaining few bytes 3761 bind(L_cleanup_loop); 3762 ldub(buf, 0, O4); 3763 inc(buf); 3764 dec(len); 3765 xor3(O4, crc, O4); 3766 and3(O4, 0xFF, O4); 3767 sllx(O4, 2, O4); 3768 lduw(table, O4, O4); 3769 srlx(crc, 8, crc); 3770 xor3(O4, crc, crc); 3771 bind(L_cleanup_check); 3772 nop(); 3773 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 3774 3775 not1(crc); 3776 } 3777 3778 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 3779 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 3780 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 3781 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 3782 3783 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 3784 3785 Label L_crc32c_head, L_crc32c_aligned; 3786 Label L_crc32c_parallel, L_crc32c_parallel_loop; 3787 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 3788 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 3789 3790 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 3791 3792 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 3793 3794 // clear upper 32 bits of crc 3795 clruwu(crc); 3796 3797 and3(buf, 7, G4); 3798 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 3799 3800 mov(8, G1); 3801 sub(G1, G4, G4); 3802 3803 // ------ process the misaligned head (7 bytes or less) ------ 3804 bind(L_crc32c_head); 3805 3806 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 3807 ldub(buf, 0, G1); 3808 update_byte_crc32(crc, G1, table); 3809 3810 inc(buf); 3811 dec(len); 3812 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 3813 dec(G4); 3814 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 3815 3816 // ------ process the 8-byte-aligned body ------ 3817 bind(L_crc32c_aligned); 3818 nop(); 3819 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 3820 3821 // reverse the byte order of lower 32 bits to big endian, and move to FP side 3822 movitof_revbytes(crc, F0, G1, G3); 3823 3824 set(CHUNK_LEN*8*4, G4); 3825 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 3826 3827 // ------ process four 1KB chunks in parallel ------ 3828 bind(L_crc32c_parallel); 3829 3830 fzero(FloatRegisterImpl::D, F2); 3831 fzero(FloatRegisterImpl::D, F4); 3832 fzero(FloatRegisterImpl::D, F6); 3833 3834 mov(CHUNK_LEN - 1, G4); 3835 bind(L_crc32c_parallel_loop); 3836 // schedule ldf's ahead of crc32c's to hide the load-use latency 3837 ldf(FloatRegisterImpl::D, buf, 0, F8); 3838 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 3839 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 3840 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 3841 crc32c(F0, F8, F0); 3842 crc32c(F2, F10, F2); 3843 crc32c(F4, F12, F4); 3844 crc32c(F6, F14, F6); 3845 inc(buf, 8); 3846 dec(G4); 3847 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 3848 3849 ldf(FloatRegisterImpl::D, buf, 0, F8); 3850 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 3851 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 3852 crc32c(F0, F8, F0); 3853 crc32c(F2, F10, F2); 3854 crc32c(F4, F12, F4); 3855 3856 inc(buf, CHUNK_LEN*24); 3857 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 3858 inc(buf, 8); 3859 3860 prefetch(buf, 0, Assembler::severalReads); 3861 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 3862 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 3863 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 3864 3865 // move to INT side, and reverse the byte order of lower 32 bits to little endian 3866 movftoi_revbytes(F0, O4, G1, G4); 3867 movftoi_revbytes(F2, O5, G1, G4); 3868 movftoi_revbytes(F4, G5, G1, G4); 3869 3870 // combine the results of 4 chunks 3871 set64(CHUNK_K1, G3, G1); 3872 xmulx(O4, G3, O4); 3873 set64(CHUNK_K2, G3, G1); 3874 xmulx(O5, G3, O5); 3875 set64(CHUNK_K3, G3, G1); 3876 xmulx(G5, G3, G5); 3877 3878 movdtox(F14, G4); 3879 xor3(O4, O5, O5); 3880 xor3(G5, O5, O5); 3881 xor3(G4, O5, O5); 3882 3883 // reverse the byte order to big endian, via stack, and move to FP side 3884 // TODO: use new revb instruction 3885 add(SP, -8, G1); 3886 srlx(G1, 3, G1); 3887 sllx(G1, 3, G1); 3888 stx(O5, G1, G0); 3889 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 3890 3891 crc32c(F6, F2, F0); 3892 3893 set(CHUNK_LEN*8*4, G4); 3894 sub(len, G4, len); 3895 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 3896 nop(); 3897 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 3898 3899 bind(L_crc32c_serial); 3900 3901 mov(32, G4); 3902 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 3903 3904 // ------ process 32B chunks ------ 3905 bind(L_crc32c_x32_loop); 3906 ldf(FloatRegisterImpl::D, buf, 0, F2); 3907 crc32c(F0, F2, F0); 3908 ldf(FloatRegisterImpl::D, buf, 8, F2); 3909 crc32c(F0, F2, F0); 3910 ldf(FloatRegisterImpl::D, buf, 16, F2); 3911 crc32c(F0, F2, F0); 3912 ldf(FloatRegisterImpl::D, buf, 24, F2); 3913 inc(buf, 32); 3914 crc32c(F0, F2, F0); 3915 dec(len, 32); 3916 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 3917 3918 bind(L_crc32c_x8); 3919 nop(); 3920 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 3921 3922 // ------ process 8B chunks ------ 3923 bind(L_crc32c_x8_loop); 3924 ldf(FloatRegisterImpl::D, buf, 0, F2); 3925 inc(buf, 8); 3926 crc32c(F0, F2, F0); 3927 dec(len, 8); 3928 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 3929 3930 bind(L_crc32c_done); 3931 3932 // move to INT side, and reverse the byte order of lower 32 bits to little endian 3933 movftoi_revbytes(F0, crc, G1, G3); 3934 3935 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 3936 3937 // ------ process the misaligned tail (7 bytes or less) ------ 3938 bind(L_crc32c_tail); 3939 3940 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 3941 ldub(buf, 0, G1); 3942 update_byte_crc32(crc, G1, table); 3943 3944 inc(buf); 3945 dec(len); 3946 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 3947 3948 bind(L_crc32c_return); 3949 nop(); 3950 }