1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/accessDecorators.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/flags/flagSetting.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/jniHandles.inline.hpp" 42 #include "runtime/objectMonitor.hpp" 43 #include "runtime/os.inline.hpp" 44 #include "runtime/safepoint.hpp" 45 #include "runtime/safepointMechanism.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/macros.hpp" 50 #ifdef COMPILER2 51 #include "opto/intrinsicnode.hpp" 52 #endif 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #define STOP(error) stop(error) 57 #else 58 #define BLOCK_COMMENT(str) block_comment(str) 59 #define STOP(error) block_comment(error); stop(error) 60 #endif 61 62 // Convert the raw encoding form into the form expected by the 63 // constructor for Address. 64 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 65 assert(scale == 0, "not supported"); 66 RelocationHolder rspec; 67 if (disp_reloc != relocInfo::none) { 68 rspec = Relocation::spec_simple(disp_reloc); 69 } 70 71 Register rindex = as_Register(index); 72 if (rindex != G0) { 73 Address madr(as_Register(base), rindex); 74 madr._rspec = rspec; 75 return madr; 76 } else { 77 Address madr(as_Register(base), disp); 78 madr._rspec = rspec; 79 return madr; 80 } 81 } 82 83 Address Argument::address_in_frame() const { 84 // Warning: In LP64 mode disp will occupy more than 10 bits, but 85 // op codes such as ld or ldx, only access disp() to get 86 // their simm13 argument. 87 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 88 if (is_in()) 89 return Address(FP, disp); // In argument. 90 else 91 return Address(SP, disp); // Out argument. 92 } 93 94 static const char* argumentNames[][2] = { 95 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 96 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 97 {"A(n>9)","P(n>9)"} 98 }; 99 100 const char* Argument::name() const { 101 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 102 int num = number(); 103 if (num >= nofArgs) num = nofArgs - 1; 104 return argumentNames[num][is_in() ? 1 : 0]; 105 } 106 107 #ifdef ASSERT 108 // On RISC, there's no benefit to verifying instruction boundaries. 109 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 110 #endif 111 112 // Patch instruction inst at offset inst_pos to refer to dest_pos 113 // and return the resulting instruction. 114 // We should have pcs, not offsets, but since all is relative, it will work out 115 // OK. 116 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 117 int m; // mask for displacement field 118 int v; // new value for displacement field 119 const int word_aligned_ones = -4; 120 switch (inv_op(inst)) { 121 default: ShouldNotReachHere(); 122 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 123 case branch_op: 124 switch (inv_op2(inst)) { 125 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 126 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 127 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 128 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 129 case bpr_op2: { 130 if (is_cbcond(inst)) { 131 m = wdisp10(word_aligned_ones, 0); 132 v = wdisp10(dest_pos, inst_pos); 133 } else { 134 m = wdisp16(word_aligned_ones, 0); 135 v = wdisp16(dest_pos, inst_pos); 136 } 137 break; 138 } 139 default: ShouldNotReachHere(); 140 } 141 } 142 return inst & ~m | v; 143 } 144 145 // Return the offset of the branch destionation of instruction inst 146 // at offset pos. 147 // Should have pcs, but since all is relative, it works out. 148 int MacroAssembler::branch_destination(int inst, int pos) { 149 int r; 150 switch (inv_op(inst)) { 151 default: ShouldNotReachHere(); 152 case call_op: r = inv_wdisp(inst, pos, 30); break; 153 case branch_op: 154 switch (inv_op2(inst)) { 155 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 156 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 157 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 158 case br_op2: r = inv_wdisp( inst, pos, 22); break; 159 case bpr_op2: { 160 if (is_cbcond(inst)) { 161 r = inv_wdisp10(inst, pos); 162 } else { 163 r = inv_wdisp16(inst, pos); 164 } 165 break; 166 } 167 default: ShouldNotReachHere(); 168 } 169 } 170 return r; 171 } 172 173 void MacroAssembler::resolve_jobject(Register value, Register tmp) { 174 Label done, not_weak; 175 br_null(value, false, Assembler::pn, done); // Use NULL as-is. 176 delayed()->andcc(value, JNIHandles::weak_tag_mask, G0); // Test for jweak 177 brx(Assembler::zero, true, Assembler::pt, not_weak); 178 delayed()->nop(); 179 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 180 Address(value, -JNIHandles::weak_tag_value), value, tmp); 181 verify_oop(value); 182 br (Assembler::always, true, Assembler::pt, done); 183 delayed()->nop(); 184 bind(not_weak); 185 access_load_at(T_OBJECT, IN_NATIVE, Address(value, 0), value, tmp); 186 verify_oop(value); 187 bind(done); 188 } 189 190 void MacroAssembler::null_check(Register reg, int offset) { 191 if (needs_explicit_null_check((intptr_t)offset)) { 192 // provoke OS NULL exception if reg = NULL by 193 // accessing M[reg] w/o changing any registers 194 ld_ptr(reg, 0, G0); 195 } 196 else { 197 // nothing to do, (later) access of M[reg + offset] 198 // will provoke OS NULL exception if reg = NULL 199 } 200 } 201 202 // Ring buffer jumps 203 204 205 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 206 assert_not_delayed(); 207 jmpl(r1, r2, G0); 208 } 209 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 210 assert_not_delayed(); 211 jmp(r1, offset); 212 } 213 214 // This code sequence is relocatable to any address, even on LP64. 215 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 216 assert_not_delayed(); 217 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 218 // variable length instruction streams. 219 patchable_sethi(addrlit, temp); 220 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 221 jmpl(a.base(), a.disp(), d); 222 } 223 224 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 225 jumpl(addrlit, temp, G0, offset, file, line); 226 } 227 228 229 // Conditional breakpoint (for assertion checks in assembly code) 230 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 231 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 232 } 233 234 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 235 void MacroAssembler::breakpoint_trap() { 236 trap(ST_RESERVED_FOR_USER_0); 237 } 238 239 // Write serialization page so VM thread can do a pseudo remote membar 240 // We use the current thread pointer to calculate a thread specific 241 // offset to write to within the page. This minimizes bus traffic 242 // due to cache line collision. 243 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 244 srl(thread, os::get_serialize_page_shift_count(), tmp2); 245 if (Assembler::is_simm13(os::vm_page_size())) { 246 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 247 } 248 else { 249 set((os::vm_page_size() - sizeof(int)), tmp1); 250 and3(tmp2, tmp1, tmp2); 251 } 252 set(os::get_memory_serialize_page(), tmp1); 253 st(G0, tmp1, tmp2); 254 } 255 256 257 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 258 if (SafepointMechanism::uses_thread_local_poll()) { 259 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 260 // Armed page has poll bit set. 261 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 262 br_notnull(temp_reg, a, Assembler::pn, slow_path); 263 } else { 264 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 265 266 load_contents(sync_state, temp_reg); 267 cmp(temp_reg, SafepointSynchronize::_not_synchronized); 268 br(Assembler::notEqual, a, Assembler::pn, slow_path); 269 } 270 } 271 272 void MacroAssembler::enter() { 273 Unimplemented(); 274 } 275 276 void MacroAssembler::leave() { 277 Unimplemented(); 278 } 279 280 // Calls to C land 281 282 #ifdef ASSERT 283 // a hook for debugging 284 static Thread* reinitialize_thread() { 285 return Thread::current(); 286 } 287 #else 288 #define reinitialize_thread Thread::current 289 #endif 290 291 #ifdef ASSERT 292 address last_get_thread = NULL; 293 #endif 294 295 // call this when G2_thread is not known to be valid 296 void MacroAssembler::get_thread() { 297 save_frame(0); // to avoid clobbering O0 298 mov(G1, L0); // avoid clobbering G1 299 mov(G5_method, L1); // avoid clobbering G5 300 mov(G3, L2); // avoid clobbering G3 also 301 mov(G4, L5); // avoid clobbering G4 302 #ifdef ASSERT 303 AddressLiteral last_get_thread_addrlit(&last_get_thread); 304 set(last_get_thread_addrlit, L3); 305 rdpc(L4); 306 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 307 #endif 308 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 309 delayed()->nop(); 310 mov(L0, G1); 311 mov(L1, G5_method); 312 mov(L2, G3); 313 mov(L5, G4); 314 restore(O0, 0, G2_thread); 315 } 316 317 static Thread* verify_thread_subroutine(Thread* gthread_value) { 318 Thread* correct_value = Thread::current(); 319 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 320 return correct_value; 321 } 322 323 void MacroAssembler::verify_thread() { 324 if (VerifyThread) { 325 // NOTE: this chops off the heads of the 64-bit O registers. 326 // make sure G2_thread contains the right value 327 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 328 mov(G1, L1); // avoid clobbering G1 329 // G2 saved below 330 mov(G3, L3); // avoid clobbering G3 331 mov(G4, L4); // avoid clobbering G4 332 mov(G5_method, L5); // avoid clobbering G5_method 333 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 334 delayed()->mov(G2_thread, O0); 335 336 mov(L1, G1); // Restore G1 337 // G2 restored below 338 mov(L3, G3); // restore G3 339 mov(L4, G4); // restore G4 340 mov(L5, G5_method); // restore G5_method 341 restore(O0, 0, G2_thread); 342 } 343 } 344 345 346 void MacroAssembler::save_thread(const Register thread_cache) { 347 verify_thread(); 348 if (thread_cache->is_valid()) { 349 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 350 mov(G2_thread, thread_cache); 351 } 352 if (VerifyThread) { 353 // smash G2_thread, as if the VM were about to anyway 354 set(0x67676767, G2_thread); 355 } 356 } 357 358 359 void MacroAssembler::restore_thread(const Register thread_cache) { 360 if (thread_cache->is_valid()) { 361 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 362 mov(thread_cache, G2_thread); 363 verify_thread(); 364 } else { 365 // do it the slow way 366 get_thread(); 367 } 368 } 369 370 371 // %%% maybe get rid of [re]set_last_Java_frame 372 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 373 assert_not_delayed(); 374 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 375 JavaFrameAnchor::flags_offset()); 376 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 377 378 // Always set last_Java_pc and flags first because once last_Java_sp is visible 379 // has_last_Java_frame is true and users will look at the rest of the fields. 380 // (Note: flags should always be zero before we get here so doesn't need to be set.) 381 382 #ifdef ASSERT 383 // Verify that flags was zeroed on return to Java 384 Label PcOk; 385 save_frame(0); // to avoid clobbering O0 386 ld_ptr(pc_addr, L0); 387 br_null_short(L0, Assembler::pt, PcOk); 388 STOP("last_Java_pc not zeroed before leaving Java"); 389 bind(PcOk); 390 391 // Verify that flags was zeroed on return to Java 392 Label FlagsOk; 393 ld(flags, L0); 394 tst(L0); 395 br(Assembler::zero, false, Assembler::pt, FlagsOk); 396 delayed() -> restore(); 397 STOP("flags not zeroed before leaving Java"); 398 bind(FlagsOk); 399 #endif /* ASSERT */ 400 // 401 // When returning from calling out from Java mode the frame anchor's last_Java_pc 402 // will always be set to NULL. It is set here so that if we are doing a call to 403 // native (not VM) that we capture the known pc and don't have to rely on the 404 // native call having a standard frame linkage where we can find the pc. 405 406 if (last_Java_pc->is_valid()) { 407 st_ptr(last_Java_pc, pc_addr); 408 } 409 410 #ifdef ASSERT 411 // Make sure that we have an odd stack 412 Label StackOk; 413 andcc(last_java_sp, 0x01, G0); 414 br(Assembler::notZero, false, Assembler::pt, StackOk); 415 delayed()->nop(); 416 STOP("Stack Not Biased in set_last_Java_frame"); 417 bind(StackOk); 418 #endif // ASSERT 419 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 420 add( last_java_sp, STACK_BIAS, G4_scratch ); 421 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 422 } 423 424 void MacroAssembler::reset_last_Java_frame(void) { 425 assert_not_delayed(); 426 427 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 428 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 429 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 430 431 #ifdef ASSERT 432 // check that it WAS previously set 433 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 434 ld_ptr(sp_addr, L0); 435 tst(L0); 436 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 437 restore(); 438 #endif // ASSERT 439 440 st_ptr(G0, sp_addr); 441 // Always return last_Java_pc to zero 442 st_ptr(G0, pc_addr); 443 // Always null flags after return to Java 444 st(G0, flags); 445 } 446 447 448 void MacroAssembler::call_VM_base( 449 Register oop_result, 450 Register thread_cache, 451 Register last_java_sp, 452 address entry_point, 453 int number_of_arguments, 454 bool check_exceptions) 455 { 456 assert_not_delayed(); 457 458 // determine last_java_sp register 459 if (!last_java_sp->is_valid()) { 460 last_java_sp = SP; 461 } 462 // debugging support 463 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 464 465 // 64-bit last_java_sp is biased! 466 set_last_Java_frame(last_java_sp, noreg); 467 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 468 save_thread(thread_cache); 469 // do the call 470 call(entry_point, relocInfo::runtime_call_type); 471 if (!VerifyThread) 472 delayed()->mov(G2_thread, O0); // pass thread as first argument 473 else 474 delayed()->nop(); // (thread already passed) 475 restore_thread(thread_cache); 476 reset_last_Java_frame(); 477 478 // check for pending exceptions. use Gtemp as scratch register. 479 if (check_exceptions) { 480 check_and_forward_exception(Gtemp); 481 } 482 483 #ifdef ASSERT 484 set(badHeapWordVal, G3); 485 set(badHeapWordVal, G4); 486 set(badHeapWordVal, G5); 487 #endif 488 489 // get oop result if there is one and reset the value in the thread 490 if (oop_result->is_valid()) { 491 get_vm_result(oop_result); 492 } 493 } 494 495 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 496 { 497 Label L; 498 499 check_and_handle_popframe(scratch_reg); 500 check_and_handle_earlyret(scratch_reg); 501 502 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 503 ld_ptr(exception_addr, scratch_reg); 504 br_null_short(scratch_reg, pt, L); 505 // we use O7 linkage so that forward_exception_entry has the issuing PC 506 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 507 delayed()->nop(); 508 bind(L); 509 } 510 511 512 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 513 } 514 515 516 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 517 } 518 519 520 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 521 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 522 } 523 524 525 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 526 // O0 is reserved for the thread 527 mov(arg_1, O1); 528 call_VM(oop_result, entry_point, 1, check_exceptions); 529 } 530 531 532 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 533 // O0 is reserved for the thread 534 mov(arg_1, O1); 535 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 536 call_VM(oop_result, entry_point, 2, check_exceptions); 537 } 538 539 540 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 541 // O0 is reserved for the thread 542 mov(arg_1, O1); 543 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 544 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 545 call_VM(oop_result, entry_point, 3, check_exceptions); 546 } 547 548 549 550 // Note: The following call_VM overloadings are useful when a "save" 551 // has already been performed by a stub, and the last Java frame is 552 // the previous one. In that case, last_java_sp must be passed as FP 553 // instead of SP. 554 555 556 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 557 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 558 } 559 560 561 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 562 // O0 is reserved for the thread 563 mov(arg_1, O1); 564 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 565 } 566 567 568 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 569 // O0 is reserved for the thread 570 mov(arg_1, O1); 571 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 572 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 573 } 574 575 576 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 577 // O0 is reserved for the thread 578 mov(arg_1, O1); 579 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 580 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 581 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 582 } 583 584 585 586 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 587 assert_not_delayed(); 588 save_thread(thread_cache); 589 // do the call 590 call(entry_point, relocInfo::runtime_call_type); 591 delayed()->nop(); 592 restore_thread(thread_cache); 593 #ifdef ASSERT 594 set(badHeapWordVal, G3); 595 set(badHeapWordVal, G4); 596 set(badHeapWordVal, G5); 597 #endif 598 } 599 600 601 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 602 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 603 } 604 605 606 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 607 mov(arg_1, O0); 608 call_VM_leaf(thread_cache, entry_point, 1); 609 } 610 611 612 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 613 mov(arg_1, O0); 614 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 615 call_VM_leaf(thread_cache, entry_point, 2); 616 } 617 618 619 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 620 mov(arg_1, O0); 621 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 622 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 623 call_VM_leaf(thread_cache, entry_point, 3); 624 } 625 626 627 void MacroAssembler::get_vm_result(Register oop_result) { 628 verify_thread(); 629 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 630 ld_ptr( vm_result_addr, oop_result); 631 st_ptr(G0, vm_result_addr); 632 verify_oop(oop_result); 633 } 634 635 636 void MacroAssembler::get_vm_result_2(Register metadata_result) { 637 verify_thread(); 638 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 639 ld_ptr(vm_result_addr_2, metadata_result); 640 st_ptr(G0, vm_result_addr_2); 641 } 642 643 644 // We require that C code which does not return a value in vm_result will 645 // leave it undisturbed. 646 void MacroAssembler::set_vm_result(Register oop_result) { 647 verify_thread(); 648 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 649 verify_oop(oop_result); 650 651 # ifdef ASSERT 652 // Check that we are not overwriting any other oop. 653 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 654 ld_ptr(vm_result_addr, L0); 655 tst(L0); 656 restore(); 657 breakpoint_trap(notZero, Assembler::ptr_cc); 658 // } 659 # endif 660 661 st_ptr(oop_result, vm_result_addr); 662 } 663 664 665 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 666 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 667 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 668 relocate(rspec); 669 call(entry, relocInfo::none); 670 if (emit_delay) { 671 delayed()->nop(); 672 } 673 } 674 675 676 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 677 address save_pc; 678 int shiftcnt; 679 #ifdef VALIDATE_PIPELINE 680 assert_no_delay("Cannot put two instructions in delay-slot."); 681 #endif 682 v9_dep(); 683 save_pc = pc(); 684 685 int msb32 = (int) (addrlit.value() >> 32); 686 int lsb32 = (int) (addrlit.value()); 687 688 if (msb32 == 0 && lsb32 >= 0) { 689 Assembler::sethi(lsb32, d, addrlit.rspec()); 690 } 691 else if (msb32 == -1) { 692 Assembler::sethi(~lsb32, d, addrlit.rspec()); 693 xor3(d, ~low10(~0), d); 694 } 695 else { 696 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 697 if (msb32 & 0x3ff) // Any bits? 698 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 699 if (lsb32 & 0xFFFFFC00) { // done? 700 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 701 sllx(d, 12, d); // Make room for next 12 bits 702 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 703 shiftcnt = 0; // We already shifted 704 } 705 else 706 shiftcnt = 12; 707 if ((lsb32 >> 10) & 0x3ff) { 708 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 709 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 710 shiftcnt = 0; 711 } 712 else 713 shiftcnt = 10; 714 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 715 } 716 else 717 sllx(d, 32, d); 718 } 719 // Pad out the instruction sequence so it can be patched later. 720 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 721 addrlit.rtype() != relocInfo::runtime_call_type)) { 722 while (pc() < (save_pc + (7 * BytesPerInstWord))) 723 nop(); 724 } 725 } 726 727 728 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 729 internal_sethi(addrlit, d, false); 730 } 731 732 733 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 734 internal_sethi(addrlit, d, true); 735 } 736 737 738 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 739 if (worst_case) return 7; 740 intptr_t iaddr = (intptr_t) a; 741 int msb32 = (int) (iaddr >> 32); 742 int lsb32 = (int) (iaddr); 743 int count; 744 if (msb32 == 0 && lsb32 >= 0) 745 count = 1; 746 else if (msb32 == -1) 747 count = 2; 748 else { 749 count = 2; 750 if (msb32 & 0x3ff) 751 count++; 752 if (lsb32 & 0xFFFFFC00 ) { 753 if ((lsb32 >> 20) & 0xfff) count += 2; 754 if ((lsb32 >> 10) & 0x3ff) count += 2; 755 } 756 } 757 return count; 758 } 759 760 int MacroAssembler::worst_case_insts_for_set() { 761 return insts_for_sethi(NULL, true) + 1; 762 } 763 764 765 // Keep in sync with MacroAssembler::insts_for_internal_set 766 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 767 intptr_t value = addrlit.value(); 768 769 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 770 // can optimize 771 if (-4096 <= value && value <= 4095) { 772 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 773 return; 774 } 775 if (inv_hi22(hi22(value)) == value) { 776 sethi(addrlit, d); 777 return; 778 } 779 } 780 assert_no_delay("Cannot put two instructions in delay-slot."); 781 internal_sethi(addrlit, d, ForceRelocatable); 782 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 783 add(d, addrlit.low10(), d, addrlit.rspec()); 784 } 785 } 786 787 // Keep in sync with MacroAssembler::internal_set 788 int MacroAssembler::insts_for_internal_set(intptr_t value) { 789 // can optimize 790 if (-4096 <= value && value <= 4095) { 791 return 1; 792 } 793 if (inv_hi22(hi22(value)) == value) { 794 return insts_for_sethi((address) value); 795 } 796 int count = insts_for_sethi((address) value); 797 AddressLiteral al(value); 798 if (al.low10() != 0) { 799 count++; 800 } 801 return count; 802 } 803 804 void MacroAssembler::set(const AddressLiteral& al, Register d) { 805 internal_set(al, d, false); 806 } 807 808 void MacroAssembler::set(intptr_t value, Register d) { 809 AddressLiteral al(value); 810 internal_set(al, d, false); 811 } 812 813 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 814 AddressLiteral al(addr, rspec); 815 internal_set(al, d, false); 816 } 817 818 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 819 internal_set(al, d, true); 820 } 821 822 void MacroAssembler::patchable_set(intptr_t value, Register d) { 823 AddressLiteral al(value); 824 internal_set(al, d, true); 825 } 826 827 828 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 829 assert_not_delayed(); 830 v9_dep(); 831 832 int hi = (int)(value >> 32); 833 int lo = (int)(value & ~0); 834 int bits_33to2 = (int)((value >> 2) & ~0); 835 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 836 if (Assembler::is_simm13(lo) && value == lo) { 837 or3(G0, lo, d); 838 } else if (hi == 0) { 839 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 840 if (low10(lo) != 0) 841 or3(d, low10(lo), d); 842 } 843 else if ((hi >> 2) == 0) { 844 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 845 sllx(d, 2, d); 846 if (low12(lo) != 0) 847 or3(d, low12(lo), d); 848 } 849 else if (hi == -1) { 850 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 851 xor3(d, low10(lo) ^ ~low10(~0), d); 852 } 853 else if (lo == 0) { 854 if (Assembler::is_simm13(hi)) { 855 or3(G0, hi, d); 856 } else { 857 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 858 if (low10(hi) != 0) 859 or3(d, low10(hi), d); 860 } 861 sllx(d, 32, d); 862 } 863 else { 864 Assembler::sethi(hi, tmp); 865 Assembler::sethi(lo, d); // macro assembler version sign-extends 866 if (low10(hi) != 0) 867 or3 (tmp, low10(hi), tmp); 868 if (low10(lo) != 0) 869 or3 ( d, low10(lo), d); 870 sllx(tmp, 32, tmp); 871 or3 (d, tmp, d); 872 } 873 } 874 875 int MacroAssembler::insts_for_set64(jlong value) { 876 v9_dep(); 877 878 int hi = (int) (value >> 32); 879 int lo = (int) (value & ~0); 880 int count = 0; 881 882 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 883 if (Assembler::is_simm13(lo) && value == lo) { 884 count++; 885 } else if (hi == 0) { 886 count++; 887 if (low10(lo) != 0) 888 count++; 889 } 890 else if (hi == -1) { 891 count += 2; 892 } 893 else if (lo == 0) { 894 if (Assembler::is_simm13(hi)) { 895 count++; 896 } else { 897 count++; 898 if (low10(hi) != 0) 899 count++; 900 } 901 count++; 902 } 903 else { 904 count += 2; 905 if (low10(hi) != 0) 906 count++; 907 if (low10(lo) != 0) 908 count++; 909 count += 2; 910 } 911 return count; 912 } 913 914 // compute size in bytes of sparc frame, given 915 // number of extraWords 916 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 917 918 int nWords = frame::memory_parameter_word_sp_offset; 919 920 nWords += extraWords; 921 922 if (nWords & 1) ++nWords; // round up to double-word 923 924 return nWords * BytesPerWord; 925 } 926 927 928 // save_frame: given number of "extra" words in frame, 929 // issue approp. save instruction (p 200, v8 manual) 930 931 void MacroAssembler::save_frame(int extraWords) { 932 int delta = -total_frame_size_in_bytes(extraWords); 933 if (is_simm13(delta)) { 934 save(SP, delta, SP); 935 } else { 936 set(delta, G3_scratch); 937 save(SP, G3_scratch, SP); 938 } 939 } 940 941 942 void MacroAssembler::save_frame_c1(int size_in_bytes) { 943 if (is_simm13(-size_in_bytes)) { 944 save(SP, -size_in_bytes, SP); 945 } else { 946 set(-size_in_bytes, G3_scratch); 947 save(SP, G3_scratch, SP); 948 } 949 } 950 951 952 void MacroAssembler::save_frame_and_mov(int extraWords, 953 Register s1, Register d1, 954 Register s2, Register d2) { 955 assert_not_delayed(); 956 957 // The trick here is to use precisely the same memory word 958 // that trap handlers also use to save the register. 959 // This word cannot be used for any other purpose, but 960 // it works fine to save the register's value, whether or not 961 // an interrupt flushes register windows at any given moment! 962 Address s1_addr; 963 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 964 s1_addr = s1->address_in_saved_window(); 965 st_ptr(s1, s1_addr); 966 } 967 968 Address s2_addr; 969 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 970 s2_addr = s2->address_in_saved_window(); 971 st_ptr(s2, s2_addr); 972 } 973 974 save_frame(extraWords); 975 976 if (s1_addr.base() == SP) { 977 ld_ptr(s1_addr.after_save(), d1); 978 } else if (s1->is_valid()) { 979 mov(s1->after_save(), d1); 980 } 981 982 if (s2_addr.base() == SP) { 983 ld_ptr(s2_addr.after_save(), d2); 984 } else if (s2->is_valid()) { 985 mov(s2->after_save(), d2); 986 } 987 } 988 989 990 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 991 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 992 int index = oop_recorder()->allocate_metadata_index(obj); 993 RelocationHolder rspec = metadata_Relocation::spec(index); 994 return AddressLiteral((address)obj, rspec); 995 } 996 997 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 998 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 999 int index = oop_recorder()->find_index(obj); 1000 RelocationHolder rspec = metadata_Relocation::spec(index); 1001 return AddressLiteral((address)obj, rspec); 1002 } 1003 1004 1005 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1006 #ifdef ASSERT 1007 { 1008 ThreadInVMfromUnknown tiv; 1009 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1010 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1011 } 1012 #endif 1013 int oop_index = oop_recorder()->find_index(obj); 1014 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1015 } 1016 1017 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1018 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1019 int oop_index = oop_recorder()->find_index(obj); 1020 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1021 1022 assert_not_delayed(); 1023 // Relocation with special format (see relocInfo_sparc.hpp). 1024 relocate(rspec, 1); 1025 // Assembler::sethi(0x3fffff, d); 1026 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1027 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1028 add(d, 0x3ff, d); 1029 1030 } 1031 1032 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1033 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1034 int klass_index = oop_recorder()->find_index(k); 1035 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1036 narrowOop encoded_k = Klass::encode_klass(k); 1037 1038 assert_not_delayed(); 1039 // Relocation with special format (see relocInfo_sparc.hpp). 1040 relocate(rspec, 1); 1041 // Assembler::sethi(encoded_k, d); 1042 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1043 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1044 add(d, low10(encoded_k), d); 1045 1046 } 1047 1048 void MacroAssembler::align(int modulus) { 1049 while (offset() % modulus != 0) nop(); 1050 } 1051 1052 void RegistersForDebugging::print(outputStream* s) { 1053 FlagSetting fs(Debugging, true); 1054 int j; 1055 for (j = 0; j < 8; ++j) { 1056 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1057 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1058 } 1059 s->cr(); 1060 1061 for (j = 0; j < 8; ++j) { 1062 s->print("l%d = ", j); os::print_location(s, l[j]); 1063 } 1064 s->cr(); 1065 1066 for (j = 0; j < 8; ++j) { 1067 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1068 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1069 } 1070 s->cr(); 1071 1072 for (j = 0; j < 8; ++j) { 1073 s->print("g%d = ", j); os::print_location(s, g[j]); 1074 } 1075 s->cr(); 1076 1077 // print out floats with compression 1078 for (j = 0; j < 32; ) { 1079 jfloat val = f[j]; 1080 int last = j; 1081 for ( ; last+1 < 32; ++last ) { 1082 char b1[1024], b2[1024]; 1083 sprintf(b1, "%f", val); 1084 sprintf(b2, "%f", f[last+1]); 1085 if (strcmp(b1, b2)) 1086 break; 1087 } 1088 s->print("f%d", j); 1089 if ( j != last ) s->print(" - f%d", last); 1090 s->print(" = %f", val); 1091 s->fill_to(25); 1092 s->print_cr(" (0x%x)", *(int*)&val); 1093 j = last + 1; 1094 } 1095 s->cr(); 1096 1097 // and doubles (evens only) 1098 for (j = 0; j < 32; ) { 1099 jdouble val = d[j]; 1100 int last = j; 1101 for ( ; last+1 < 32; ++last ) { 1102 char b1[1024], b2[1024]; 1103 sprintf(b1, "%f", val); 1104 sprintf(b2, "%f", d[last+1]); 1105 if (strcmp(b1, b2)) 1106 break; 1107 } 1108 s->print("d%d", 2 * j); 1109 if ( j != last ) s->print(" - d%d", last); 1110 s->print(" = %f", val); 1111 s->fill_to(30); 1112 s->print("(0x%x)", *(int*)&val); 1113 s->fill_to(42); 1114 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1115 j = last + 1; 1116 } 1117 s->cr(); 1118 } 1119 1120 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1121 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1122 a->flushw(); 1123 int i; 1124 for (i = 0; i < 8; ++i) { 1125 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1126 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1127 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1128 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1129 } 1130 for (i = 0; i < 32; ++i) { 1131 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1132 } 1133 for (i = 0; i < 64; i += 2) { 1134 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1135 } 1136 } 1137 1138 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1139 for (int i = 1; i < 8; ++i) { 1140 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1141 } 1142 for (int j = 0; j < 32; ++j) { 1143 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1144 } 1145 for (int k = 0; k < 64; k += 2) { 1146 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1147 } 1148 } 1149 1150 1151 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1152 void MacroAssembler::push_fTOS() { 1153 // %%%%%% need to implement this 1154 } 1155 1156 // pops double TOS element from CPU stack and pushes on FPU stack 1157 void MacroAssembler::pop_fTOS() { 1158 // %%%%%% need to implement this 1159 } 1160 1161 void MacroAssembler::empty_FPU_stack() { 1162 // %%%%%% need to implement this 1163 } 1164 1165 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1166 // plausibility check for oops 1167 if (!VerifyOops) return; 1168 1169 if (reg == G0) return; // always NULL, which is always an oop 1170 1171 BLOCK_COMMENT("verify_oop {"); 1172 char buffer[64]; 1173 #ifdef COMPILER1 1174 if (CommentedAssembly) { 1175 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1176 block_comment(buffer); 1177 } 1178 #endif 1179 1180 const char* real_msg = NULL; 1181 { 1182 ResourceMark rm; 1183 stringStream ss; 1184 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1185 real_msg = code_string(ss.as_string()); 1186 } 1187 1188 // Call indirectly to solve generation ordering problem 1189 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1190 1191 // Make some space on stack above the current register window. 1192 // Enough to hold 8 64-bit registers. 1193 add(SP,-8*8,SP); 1194 1195 // Save some 64-bit registers; a normal 'save' chops the heads off 1196 // of 64-bit longs in the 32-bit build. 1197 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1198 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1199 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1200 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1201 1202 // Size of set() should stay the same 1203 patchable_set((intptr_t)real_msg, O1); 1204 // Load address to call to into O7 1205 load_ptr_contents(a, O7); 1206 // Register call to verify_oop_subroutine 1207 callr(O7, G0); 1208 delayed()->nop(); 1209 // recover frame size 1210 add(SP, 8*8,SP); 1211 BLOCK_COMMENT("} verify_oop"); 1212 } 1213 1214 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1215 // plausibility check for oops 1216 if (!VerifyOops) return; 1217 1218 const char* real_msg = NULL; 1219 { 1220 ResourceMark rm; 1221 stringStream ss; 1222 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1223 real_msg = code_string(ss.as_string()); 1224 } 1225 1226 // Call indirectly to solve generation ordering problem 1227 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1228 1229 // Make some space on stack above the current register window. 1230 // Enough to hold 8 64-bit registers. 1231 add(SP,-8*8,SP); 1232 1233 // Save some 64-bit registers; a normal 'save' chops the heads off 1234 // of 64-bit longs in the 32-bit build. 1235 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1236 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1237 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1238 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1239 1240 // Size of set() should stay the same 1241 patchable_set((intptr_t)real_msg, O1); 1242 // Load address to call to into O7 1243 load_ptr_contents(a, O7); 1244 // Register call to verify_oop_subroutine 1245 callr(O7, G0); 1246 delayed()->nop(); 1247 // recover frame size 1248 add(SP, 8*8,SP); 1249 } 1250 1251 // side-door communication with signalHandler in os_solaris.cpp 1252 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1253 1254 // This macro is expanded just once; it creates shared code. Contract: 1255 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1256 // registers, including flags. May not use a register 'save', as this blows 1257 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1258 // call. 1259 void MacroAssembler::verify_oop_subroutine() { 1260 // Leaf call; no frame. 1261 Label succeed, fail, null_or_fail; 1262 1263 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1264 // O0 is now the oop to be checked. O7 is the return address. 1265 Register O0_obj = O0; 1266 1267 // Save some more registers for temps. 1268 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1269 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1270 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1271 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1272 1273 // Save flags 1274 Register O5_save_flags = O5; 1275 rdccr( O5_save_flags ); 1276 1277 { // count number of verifies 1278 Register O2_adr = O2; 1279 Register O3_accum = O3; 1280 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1281 } 1282 1283 Register O2_mask = O2; 1284 Register O3_bits = O3; 1285 Register O4_temp = O4; 1286 1287 // mark lower end of faulting range 1288 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1289 _verify_oop_implicit_branch[0] = pc(); 1290 1291 // We can't check the mark oop because it could be in the process of 1292 // locking or unlocking while this is running. 1293 set(Universe::verify_oop_mask (), O2_mask); 1294 set(Universe::verify_oop_bits (), O3_bits); 1295 1296 // assert((obj & oop_mask) == oop_bits); 1297 and3(O0_obj, O2_mask, O4_temp); 1298 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1299 1300 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1301 // the null_or_fail case is useless; must test for null separately 1302 br_null_short(O0_obj, pn, succeed); 1303 } 1304 1305 // Check the Klass* of this object for being in the right area of memory. 1306 // Cannot do the load in the delay above slot in case O0 is null 1307 load_klass(O0_obj, O0_obj); 1308 // assert((klass != NULL) 1309 br_null_short(O0_obj, pn, fail); 1310 1311 wrccr( O5_save_flags ); // Restore CCR's 1312 1313 // mark upper end of faulting range 1314 _verify_oop_implicit_branch[1] = pc(); 1315 1316 //----------------------- 1317 // all tests pass 1318 bind(succeed); 1319 1320 // Restore prior 64-bit registers 1321 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1322 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1323 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1324 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1325 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1326 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1327 1328 retl(); // Leaf return; restore prior O7 in delay slot 1329 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1330 1331 //----------------------- 1332 bind(null_or_fail); // nulls are less common but OK 1333 br_null(O0_obj, false, pt, succeed); 1334 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1335 1336 //----------------------- 1337 // report failure: 1338 bind(fail); 1339 _verify_oop_implicit_branch[2] = pc(); 1340 1341 wrccr( O5_save_flags ); // Restore CCR's 1342 1343 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1344 1345 // stop_subroutine expects message pointer in I1. 1346 mov(I1, O1); 1347 1348 // Restore prior 64-bit registers 1349 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1350 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1351 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1352 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1353 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1354 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1355 1356 // factor long stop-sequence into subroutine to save space 1357 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1358 1359 // call indirectly to solve generation ordering problem 1360 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1361 load_ptr_contents(al, O5); 1362 jmpl(O5, 0, O7); 1363 delayed()->nop(); 1364 } 1365 1366 1367 void MacroAssembler::stop(const char* msg) { 1368 // save frame first to get O7 for return address 1369 // add one word to size in case struct is odd number of words long 1370 // It must be doubleword-aligned for storing doubles into it. 1371 1372 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1373 1374 // stop_subroutine expects message pointer in I1. 1375 // Size of set() should stay the same 1376 patchable_set((intptr_t)msg, O1); 1377 1378 // factor long stop-sequence into subroutine to save space 1379 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1380 1381 // call indirectly to solve generation ordering problem 1382 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1383 load_ptr_contents(a, O5); 1384 jmpl(O5, 0, O7); 1385 delayed()->nop(); 1386 1387 breakpoint_trap(); // make stop actually stop rather than writing 1388 // unnoticeable results in the output files. 1389 1390 // restore(); done in callee to save space! 1391 } 1392 1393 1394 void MacroAssembler::warn(const char* msg) { 1395 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1396 RegistersForDebugging::save_registers(this); 1397 mov(O0, L0); 1398 // Size of set() should stay the same 1399 patchable_set((intptr_t)msg, O0); 1400 call( CAST_FROM_FN_PTR(address, warning) ); 1401 delayed()->nop(); 1402 // ret(); 1403 // delayed()->restore(); 1404 RegistersForDebugging::restore_registers(this, L0); 1405 restore(); 1406 } 1407 1408 1409 void MacroAssembler::untested(const char* what) { 1410 // We must be able to turn interactive prompting off 1411 // in order to run automated test scripts on the VM 1412 // Use the flag ShowMessageBoxOnError 1413 1414 const char* b = NULL; 1415 { 1416 ResourceMark rm; 1417 stringStream ss; 1418 ss.print("untested: %s", what); 1419 b = code_string(ss.as_string()); 1420 } 1421 if (ShowMessageBoxOnError) { STOP(b); } 1422 else { warn(b); } 1423 } 1424 1425 1426 void MacroAssembler::unimplemented(const char* what) { 1427 const char* buf = NULL; 1428 { 1429 ResourceMark rm; 1430 stringStream ss; 1431 ss.print("unimplemented: %s", what); 1432 buf = code_string(ss.as_string()); 1433 } 1434 stop(buf); 1435 } 1436 1437 1438 void MacroAssembler::stop_subroutine() { 1439 RegistersForDebugging::save_registers(this); 1440 1441 // for the sake of the debugger, stick a PC on the current frame 1442 // (this assumes that the caller has performed an extra "save") 1443 mov(I7, L7); 1444 add(O7, -7 * BytesPerInt, I7); 1445 1446 save_frame(); // one more save to free up another O7 register 1447 mov(I0, O1); // addr of reg save area 1448 1449 // We expect pointer to message in I1. Caller must set it up in O1 1450 mov(I1, O0); // get msg 1451 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1452 delayed()->nop(); 1453 1454 restore(); 1455 1456 RegistersForDebugging::restore_registers(this, O0); 1457 1458 save_frame(0); 1459 call(CAST_FROM_FN_PTR(address,breakpoint)); 1460 delayed()->nop(); 1461 restore(); 1462 1463 mov(L7, I7); 1464 retl(); 1465 delayed()->restore(); // see stop above 1466 } 1467 1468 1469 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1470 if ( ShowMessageBoxOnError ) { 1471 JavaThread* thread = JavaThread::current(); 1472 JavaThreadState saved_state = thread->thread_state(); 1473 thread->set_thread_state(_thread_in_vm); 1474 { 1475 // In order to get locks work, we need to fake a in_VM state 1476 ttyLocker ttyl; 1477 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1478 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1479 BytecodeCounter::print(); 1480 } 1481 if (os::message_box(msg, "Execution stopped, print registers?")) 1482 regs->print(::tty); 1483 } 1484 BREAKPOINT; 1485 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1486 } 1487 else { 1488 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1489 } 1490 assert(false, "DEBUG MESSAGE: %s", msg); 1491 } 1492 1493 1494 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1495 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1496 Label no_extras; 1497 br( negative, true, pt, no_extras ); // if neg, clear reg 1498 delayed()->set(0, Rresult); // annuled, so only if taken 1499 bind( no_extras ); 1500 } 1501 1502 1503 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1504 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1505 bclr(1, Rresult); 1506 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1507 } 1508 1509 1510 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1511 calc_frame_size(Rextra_words, Rresult); 1512 neg(Rresult); 1513 save(SP, Rresult, SP); 1514 } 1515 1516 1517 // --------------------------------------------------------- 1518 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1519 switch (c) { 1520 /*case zero: */ 1521 case Assembler::equal: return Assembler::rc_z; 1522 case Assembler::lessEqual: return Assembler::rc_lez; 1523 case Assembler::less: return Assembler::rc_lz; 1524 /*case notZero:*/ 1525 case Assembler::notEqual: return Assembler::rc_nz; 1526 case Assembler::greater: return Assembler::rc_gz; 1527 case Assembler::greaterEqual: return Assembler::rc_gez; 1528 } 1529 ShouldNotReachHere(); 1530 return Assembler::rc_z; 1531 } 1532 1533 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1534 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1535 tst(s1); 1536 br (c, a, p, L); 1537 } 1538 1539 // Compares a pointer register with zero and branches on null. 1540 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1541 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1542 assert_not_delayed(); 1543 bpr( rc_z, a, p, s1, L ); 1544 } 1545 1546 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1547 assert_not_delayed(); 1548 bpr( rc_nz, a, p, s1, L ); 1549 } 1550 1551 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1552 1553 // Compare integer (32 bit) values (icc only). 1554 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1555 Predict p, Label& L) { 1556 assert_not_delayed(); 1557 if (use_cbcond(L)) { 1558 Assembler::cbcond(c, icc, s1, s2, L); 1559 } else { 1560 cmp(s1, s2); 1561 br(c, false, p, L); 1562 delayed()->nop(); 1563 } 1564 } 1565 1566 // Compare integer (32 bit) values (icc only). 1567 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1568 Predict p, Label& L) { 1569 assert_not_delayed(); 1570 if (is_simm(simm13a,5) && use_cbcond(L)) { 1571 Assembler::cbcond(c, icc, s1, simm13a, L); 1572 } else { 1573 cmp(s1, simm13a); 1574 br(c, false, p, L); 1575 delayed()->nop(); 1576 } 1577 } 1578 1579 // Branch that tests xcc in LP64 and icc in !LP64 1580 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1581 Predict p, Label& L) { 1582 assert_not_delayed(); 1583 if (use_cbcond(L)) { 1584 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1585 } else { 1586 cmp(s1, s2); 1587 brx(c, false, p, L); 1588 delayed()->nop(); 1589 } 1590 } 1591 1592 // Branch that tests xcc in LP64 and icc in !LP64 1593 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1594 Predict p, Label& L) { 1595 assert_not_delayed(); 1596 if (is_simm(simm13a,5) && use_cbcond(L)) { 1597 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1598 } else { 1599 cmp(s1, simm13a); 1600 brx(c, false, p, L); 1601 delayed()->nop(); 1602 } 1603 } 1604 1605 // Short branch version for compares a pointer with zero. 1606 1607 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1608 assert_not_delayed(); 1609 if (use_cbcond(L)) { 1610 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1611 } else { 1612 br_null(s1, false, p, L); 1613 delayed()->nop(); 1614 } 1615 } 1616 1617 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1618 assert_not_delayed(); 1619 if (use_cbcond(L)) { 1620 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1621 } else { 1622 br_notnull(s1, false, p, L); 1623 delayed()->nop(); 1624 } 1625 } 1626 1627 // Unconditional short branch 1628 void MacroAssembler::ba_short(Label& L) { 1629 assert_not_delayed(); 1630 if (use_cbcond(L)) { 1631 Assembler::cbcond(equal, icc, G0, G0, L); 1632 } else { 1633 br(always, false, pt, L); 1634 delayed()->nop(); 1635 } 1636 } 1637 1638 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1639 1640 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1641 assert_not_delayed(); 1642 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1643 br(cf, false, p, L); 1644 delayed()->nop(); 1645 } 1646 1647 // instruction sequences factored across compiler & interpreter 1648 1649 1650 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1651 Register Rb_hi, Register Rb_low, 1652 Register Rresult) { 1653 1654 Label check_low_parts, done; 1655 1656 cmp(Ra_hi, Rb_hi ); // compare hi parts 1657 br(equal, true, pt, check_low_parts); 1658 delayed()->cmp(Ra_low, Rb_low); // test low parts 1659 1660 // And, with an unsigned comparison, it does not matter if the numbers 1661 // are negative or not. 1662 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1663 // The second one is bigger (unsignedly). 1664 1665 // Other notes: The first move in each triplet can be unconditional 1666 // (and therefore probably prefetchable). 1667 // And the equals case for the high part does not need testing, 1668 // since that triplet is reached only after finding the high halves differ. 1669 1670 mov(-1, Rresult); 1671 ba(done); 1672 delayed()->movcc(greater, false, icc, 1, Rresult); 1673 1674 bind(check_low_parts); 1675 1676 mov( -1, Rresult); 1677 movcc(equal, false, icc, 0, Rresult); 1678 movcc(greaterUnsigned, false, icc, 1, Rresult); 1679 1680 bind(done); 1681 } 1682 1683 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1684 subcc( G0, Rlow, Rlow ); 1685 subc( G0, Rhi, Rhi ); 1686 } 1687 1688 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1689 Register Rcount, 1690 Register Rout_high, Register Rout_low, 1691 Register Rtemp ) { 1692 1693 1694 Register Ralt_count = Rtemp; 1695 Register Rxfer_bits = Rtemp; 1696 1697 assert( Ralt_count != Rin_high 1698 && Ralt_count != Rin_low 1699 && Ralt_count != Rcount 1700 && Rxfer_bits != Rin_low 1701 && Rxfer_bits != Rin_high 1702 && Rxfer_bits != Rcount 1703 && Rxfer_bits != Rout_low 1704 && Rout_low != Rin_high, 1705 "register alias checks"); 1706 1707 Label big_shift, done; 1708 1709 // This code can be optimized to use the 64 bit shifts in V9. 1710 // Here we use the 32 bit shifts. 1711 1712 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1713 subcc(Rcount, 31, Ralt_count); 1714 br(greater, true, pn, big_shift); 1715 delayed()->dec(Ralt_count); 1716 1717 // shift < 32 bits, Ralt_count = Rcount-31 1718 1719 // We get the transfer bits by shifting right by 32-count the low 1720 // register. This is done by shifting right by 31-count and then by one 1721 // more to take care of the special (rare) case where count is zero 1722 // (shifting by 32 would not work). 1723 1724 neg(Ralt_count); 1725 1726 // The order of the next two instructions is critical in the case where 1727 // Rin and Rout are the same and should not be reversed. 1728 1729 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1730 if (Rcount != Rout_low) { 1731 sll(Rin_low, Rcount, Rout_low); // low half 1732 } 1733 sll(Rin_high, Rcount, Rout_high); 1734 if (Rcount == Rout_low) { 1735 sll(Rin_low, Rcount, Rout_low); // low half 1736 } 1737 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1738 ba(done); 1739 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1740 1741 // shift >= 32 bits, Ralt_count = Rcount-32 1742 bind(big_shift); 1743 sll(Rin_low, Ralt_count, Rout_high ); 1744 clr(Rout_low); 1745 1746 bind(done); 1747 } 1748 1749 1750 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1751 Register Rcount, 1752 Register Rout_high, Register Rout_low, 1753 Register Rtemp ) { 1754 1755 Register Ralt_count = Rtemp; 1756 Register Rxfer_bits = Rtemp; 1757 1758 assert( Ralt_count != Rin_high 1759 && Ralt_count != Rin_low 1760 && Ralt_count != Rcount 1761 && Rxfer_bits != Rin_low 1762 && Rxfer_bits != Rin_high 1763 && Rxfer_bits != Rcount 1764 && Rxfer_bits != Rout_high 1765 && Rout_high != Rin_low, 1766 "register alias checks"); 1767 1768 Label big_shift, done; 1769 1770 // This code can be optimized to use the 64 bit shifts in V9. 1771 // Here we use the 32 bit shifts. 1772 1773 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1774 subcc(Rcount, 31, Ralt_count); 1775 br(greater, true, pn, big_shift); 1776 delayed()->dec(Ralt_count); 1777 1778 // shift < 32 bits, Ralt_count = Rcount-31 1779 1780 // We get the transfer bits by shifting left by 32-count the high 1781 // register. This is done by shifting left by 31-count and then by one 1782 // more to take care of the special (rare) case where count is zero 1783 // (shifting by 32 would not work). 1784 1785 neg(Ralt_count); 1786 if (Rcount != Rout_low) { 1787 srl(Rin_low, Rcount, Rout_low); 1788 } 1789 1790 // The order of the next two instructions is critical in the case where 1791 // Rin and Rout are the same and should not be reversed. 1792 1793 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1794 sra(Rin_high, Rcount, Rout_high ); // high half 1795 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1796 if (Rcount == Rout_low) { 1797 srl(Rin_low, Rcount, Rout_low); 1798 } 1799 ba(done); 1800 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1801 1802 // shift >= 32 bits, Ralt_count = Rcount-32 1803 bind(big_shift); 1804 1805 sra(Rin_high, Ralt_count, Rout_low); 1806 sra(Rin_high, 31, Rout_high); // sign into hi 1807 1808 bind( done ); 1809 } 1810 1811 1812 1813 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1814 Register Rcount, 1815 Register Rout_high, Register Rout_low, 1816 Register Rtemp ) { 1817 1818 Register Ralt_count = Rtemp; 1819 Register Rxfer_bits = Rtemp; 1820 1821 assert( Ralt_count != Rin_high 1822 && Ralt_count != Rin_low 1823 && Ralt_count != Rcount 1824 && Rxfer_bits != Rin_low 1825 && Rxfer_bits != Rin_high 1826 && Rxfer_bits != Rcount 1827 && Rxfer_bits != Rout_high 1828 && Rout_high != Rin_low, 1829 "register alias checks"); 1830 1831 Label big_shift, done; 1832 1833 // This code can be optimized to use the 64 bit shifts in V9. 1834 // Here we use the 32 bit shifts. 1835 1836 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1837 subcc(Rcount, 31, Ralt_count); 1838 br(greater, true, pn, big_shift); 1839 delayed()->dec(Ralt_count); 1840 1841 // shift < 32 bits, Ralt_count = Rcount-31 1842 1843 // We get the transfer bits by shifting left by 32-count the high 1844 // register. This is done by shifting left by 31-count and then by one 1845 // more to take care of the special (rare) case where count is zero 1846 // (shifting by 32 would not work). 1847 1848 neg(Ralt_count); 1849 if (Rcount != Rout_low) { 1850 srl(Rin_low, Rcount, Rout_low); 1851 } 1852 1853 // The order of the next two instructions is critical in the case where 1854 // Rin and Rout are the same and should not be reversed. 1855 1856 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1857 srl(Rin_high, Rcount, Rout_high ); // high half 1858 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1859 if (Rcount == Rout_low) { 1860 srl(Rin_low, Rcount, Rout_low); 1861 } 1862 ba(done); 1863 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1864 1865 // shift >= 32 bits, Ralt_count = Rcount-32 1866 bind(big_shift); 1867 1868 srl(Rin_high, Ralt_count, Rout_low); 1869 clr(Rout_high); 1870 1871 bind( done ); 1872 } 1873 1874 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1875 cmp(Ra, Rb); 1876 mov(-1, Rresult); 1877 movcc(equal, false, xcc, 0, Rresult); 1878 movcc(greater, false, xcc, 1, Rresult); 1879 } 1880 1881 1882 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1883 switch (size_in_bytes) { 1884 case 8: ld_long(src, dst); break; 1885 case 4: ld( src, dst); break; 1886 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1887 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1888 default: ShouldNotReachHere(); 1889 } 1890 } 1891 1892 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1893 switch (size_in_bytes) { 1894 case 8: st_long(src, dst); break; 1895 case 4: st( src, dst); break; 1896 case 2: sth( src, dst); break; 1897 case 1: stb( src, dst); break; 1898 default: ShouldNotReachHere(); 1899 } 1900 } 1901 1902 1903 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1904 FloatRegister Fa, FloatRegister Fb, 1905 Register Rresult) { 1906 if (is_float) { 1907 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1908 } else { 1909 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1910 } 1911 1912 if (unordered_result == 1) { 1913 mov( -1, Rresult); 1914 movcc(f_equal, true, fcc0, 0, Rresult); 1915 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1916 } else { 1917 mov( -1, Rresult); 1918 movcc(f_equal, true, fcc0, 0, Rresult); 1919 movcc(f_greater, true, fcc0, 1, Rresult); 1920 } 1921 } 1922 1923 1924 void MacroAssembler::save_all_globals_into_locals() { 1925 mov(G1,L1); 1926 mov(G2,L2); 1927 mov(G3,L3); 1928 mov(G4,L4); 1929 mov(G5,L5); 1930 mov(G6,L6); 1931 mov(G7,L7); 1932 } 1933 1934 void MacroAssembler::restore_globals_from_locals() { 1935 mov(L1,G1); 1936 mov(L2,G2); 1937 mov(L3,G3); 1938 mov(L4,G4); 1939 mov(L5,G5); 1940 mov(L6,G6); 1941 mov(L7,G7); 1942 } 1943 1944 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1945 Register tmp, 1946 int offset) { 1947 intptr_t value = *delayed_value_addr; 1948 if (value != 0) 1949 return RegisterOrConstant(value + offset); 1950 1951 // load indirectly to solve generation ordering problem 1952 AddressLiteral a(delayed_value_addr); 1953 load_ptr_contents(a, tmp); 1954 1955 #ifdef ASSERT 1956 tst(tmp); 1957 breakpoint_trap(zero, xcc); 1958 #endif 1959 1960 if (offset != 0) 1961 add(tmp, offset, tmp); 1962 1963 return RegisterOrConstant(tmp); 1964 } 1965 1966 1967 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1968 assert(d.register_or_noreg() != G0, "lost side effect"); 1969 if ((s2.is_constant() && s2.as_constant() == 0) || 1970 (s2.is_register() && s2.as_register() == G0)) { 1971 // Do nothing, just move value. 1972 if (s1.is_register()) { 1973 if (d.is_constant()) d = temp; 1974 mov(s1.as_register(), d.as_register()); 1975 return d; 1976 } else { 1977 return s1; 1978 } 1979 } 1980 1981 if (s1.is_register()) { 1982 assert_different_registers(s1.as_register(), temp); 1983 if (d.is_constant()) d = temp; 1984 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1985 return d; 1986 } else { 1987 if (s2.is_register()) { 1988 assert_different_registers(s2.as_register(), temp); 1989 if (d.is_constant()) d = temp; 1990 set(s1.as_constant(), temp); 1991 andn(temp, s2.as_register(), d.as_register()); 1992 return d; 1993 } else { 1994 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1995 return res; 1996 } 1997 } 1998 } 1999 2000 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2001 assert(d.register_or_noreg() != G0, "lost side effect"); 2002 if ((s2.is_constant() && s2.as_constant() == 0) || 2003 (s2.is_register() && s2.as_register() == G0)) { 2004 // Do nothing, just move value. 2005 if (s1.is_register()) { 2006 if (d.is_constant()) d = temp; 2007 mov(s1.as_register(), d.as_register()); 2008 return d; 2009 } else { 2010 return s1; 2011 } 2012 } 2013 2014 if (s1.is_register()) { 2015 assert_different_registers(s1.as_register(), temp); 2016 if (d.is_constant()) d = temp; 2017 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2018 return d; 2019 } else { 2020 if (s2.is_register()) { 2021 assert_different_registers(s2.as_register(), temp); 2022 if (d.is_constant()) d = temp; 2023 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2024 return d; 2025 } else { 2026 intptr_t res = s1.as_constant() + s2.as_constant(); 2027 return res; 2028 } 2029 } 2030 } 2031 2032 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2033 assert(d.register_or_noreg() != G0, "lost side effect"); 2034 if (!is_simm13(s2.constant_or_zero())) 2035 s2 = (s2.as_constant() & 0xFF); 2036 if ((s2.is_constant() && s2.as_constant() == 0) || 2037 (s2.is_register() && s2.as_register() == G0)) { 2038 // Do nothing, just move value. 2039 if (s1.is_register()) { 2040 if (d.is_constant()) d = temp; 2041 mov(s1.as_register(), d.as_register()); 2042 return d; 2043 } else { 2044 return s1; 2045 } 2046 } 2047 2048 if (s1.is_register()) { 2049 assert_different_registers(s1.as_register(), temp); 2050 if (d.is_constant()) d = temp; 2051 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2052 return d; 2053 } else { 2054 if (s2.is_register()) { 2055 assert_different_registers(s2.as_register(), temp); 2056 if (d.is_constant()) d = temp; 2057 set(s1.as_constant(), temp); 2058 sll_ptr(temp, s2.as_register(), d.as_register()); 2059 return d; 2060 } else { 2061 intptr_t res = s1.as_constant() << s2.as_constant(); 2062 return res; 2063 } 2064 } 2065 } 2066 2067 2068 // Look up the method for a megamorphic invokeinterface call. 2069 // The target method is determined by <intf_klass, itable_index>. 2070 // The receiver klass is in recv_klass. 2071 // On success, the result will be in method_result, and execution falls through. 2072 // On failure, execution transfers to the given label. 2073 void MacroAssembler::lookup_interface_method(Register recv_klass, 2074 Register intf_klass, 2075 RegisterOrConstant itable_index, 2076 Register method_result, 2077 Register scan_temp, 2078 Register sethi_temp, 2079 Label& L_no_such_interface, 2080 bool return_method) { 2081 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2082 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2083 "caller must use same register for non-constant itable index as for method"); 2084 2085 Label L_no_such_interface_restore; 2086 bool did_save = false; 2087 if (scan_temp == noreg || sethi_temp == noreg) { 2088 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2089 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2090 assert(method_result->is_global(), "must be able to return value"); 2091 scan_temp = L2; 2092 sethi_temp = L3; 2093 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2094 recv_klass = recv_2; 2095 intf_klass = intf_2; 2096 did_save = true; 2097 } 2098 2099 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2100 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2101 int scan_step = itableOffsetEntry::size() * wordSize; 2102 int vte_size = vtableEntry::size_in_bytes(); 2103 2104 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2105 // %%% We should store the aligned, prescaled offset in the klassoop. 2106 // Then the next several instructions would fold away. 2107 2108 int itb_offset = vtable_base; 2109 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2110 sll(scan_temp, itb_scale, scan_temp); 2111 add(scan_temp, itb_offset, scan_temp); 2112 add(recv_klass, scan_temp, scan_temp); 2113 2114 if (return_method) { 2115 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2116 RegisterOrConstant itable_offset = itable_index; 2117 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2118 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2119 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2120 } 2121 2122 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2123 // if (scan->interface() == intf) { 2124 // result = (klass + scan->offset() + itable_index); 2125 // } 2126 // } 2127 Label L_search, L_found_method; 2128 2129 for (int peel = 1; peel >= 0; peel--) { 2130 // %%%% Could load both offset and interface in one ldx, if they were 2131 // in the opposite order. This would save a load. 2132 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2133 2134 // Check that this entry is non-null. A null entry means that 2135 // the receiver class doesn't implement the interface, and wasn't the 2136 // same as when the caller was compiled. 2137 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2138 delayed()->cmp(method_result, intf_klass); 2139 2140 if (peel) { 2141 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2142 } else { 2143 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2144 // (invert the test to fall through to found_method...) 2145 } 2146 delayed()->add(scan_temp, scan_step, scan_temp); 2147 2148 if (!peel) break; 2149 2150 bind(L_search); 2151 } 2152 2153 bind(L_found_method); 2154 2155 if (return_method) { 2156 // Got a hit. 2157 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2158 // scan_temp[-scan_step] points to the vtable offset we need 2159 ito_offset -= scan_step; 2160 lduw(scan_temp, ito_offset, scan_temp); 2161 ld_ptr(recv_klass, scan_temp, method_result); 2162 } 2163 2164 if (did_save) { 2165 Label L_done; 2166 ba(L_done); 2167 delayed()->restore(); 2168 2169 bind(L_no_such_interface_restore); 2170 ba(L_no_such_interface); 2171 delayed()->restore(); 2172 2173 bind(L_done); 2174 } 2175 } 2176 2177 2178 // virtual method calling 2179 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2180 RegisterOrConstant vtable_index, 2181 Register method_result) { 2182 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2183 Register sethi_temp = method_result; 2184 const int base = in_bytes(Klass::vtable_start_offset()) + 2185 // method pointer offset within the vtable entry: 2186 vtableEntry::method_offset_in_bytes(); 2187 RegisterOrConstant vtable_offset = vtable_index; 2188 // Each of the following three lines potentially generates an instruction. 2189 // But the total number of address formation instructions will always be 2190 // at most two, and will often be zero. In any case, it will be optimal. 2191 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2192 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2193 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2194 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2195 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2196 ld_ptr(vtable_entry_addr, method_result); 2197 } 2198 2199 2200 void MacroAssembler::check_klass_subtype(Register sub_klass, 2201 Register super_klass, 2202 Register temp_reg, 2203 Register temp2_reg, 2204 Label& L_success) { 2205 Register sub_2 = sub_klass; 2206 Register sup_2 = super_klass; 2207 if (!sub_2->is_global()) sub_2 = L0; 2208 if (!sup_2->is_global()) sup_2 = L1; 2209 bool did_save = false; 2210 if (temp_reg == noreg || temp2_reg == noreg) { 2211 temp_reg = L2; 2212 temp2_reg = L3; 2213 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2214 sub_klass = sub_2; 2215 super_klass = sup_2; 2216 did_save = true; 2217 } 2218 Label L_failure, L_pop_to_failure, L_pop_to_success; 2219 check_klass_subtype_fast_path(sub_klass, super_klass, 2220 temp_reg, temp2_reg, 2221 (did_save ? &L_pop_to_success : &L_success), 2222 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2223 2224 if (!did_save) 2225 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2226 check_klass_subtype_slow_path(sub_2, sup_2, 2227 L2, L3, L4, L5, 2228 NULL, &L_pop_to_failure); 2229 2230 // on success: 2231 bind(L_pop_to_success); 2232 restore(); 2233 ba_short(L_success); 2234 2235 // on failure: 2236 bind(L_pop_to_failure); 2237 restore(); 2238 bind(L_failure); 2239 } 2240 2241 2242 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2243 Register super_klass, 2244 Register temp_reg, 2245 Register temp2_reg, 2246 Label* L_success, 2247 Label* L_failure, 2248 Label* L_slow_path, 2249 RegisterOrConstant super_check_offset) { 2250 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2251 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2252 2253 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2254 bool need_slow_path = (must_load_sco || 2255 super_check_offset.constant_or_zero() == sco_offset); 2256 2257 assert_different_registers(sub_klass, super_klass, temp_reg); 2258 if (super_check_offset.is_register()) { 2259 assert_different_registers(sub_klass, super_klass, temp_reg, 2260 super_check_offset.as_register()); 2261 } else if (must_load_sco) { 2262 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2263 } 2264 2265 Label L_fallthrough; 2266 int label_nulls = 0; 2267 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2268 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2269 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2270 assert(label_nulls <= 1 || 2271 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2272 "at most one NULL in the batch, usually"); 2273 2274 // If the pointers are equal, we are done (e.g., String[] elements). 2275 // This self-check enables sharing of secondary supertype arrays among 2276 // non-primary types such as array-of-interface. Otherwise, each such 2277 // type would need its own customized SSA. 2278 // We move this check to the front of the fast path because many 2279 // type checks are in fact trivially successful in this manner, 2280 // so we get a nicely predicted branch right at the start of the check. 2281 cmp(super_klass, sub_klass); 2282 brx(Assembler::equal, false, Assembler::pn, *L_success); 2283 delayed()->nop(); 2284 2285 // Check the supertype display: 2286 if (must_load_sco) { 2287 // The super check offset is always positive... 2288 lduw(super_klass, sco_offset, temp2_reg); 2289 super_check_offset = RegisterOrConstant(temp2_reg); 2290 // super_check_offset is register. 2291 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2292 } 2293 ld_ptr(sub_klass, super_check_offset, temp_reg); 2294 cmp(super_klass, temp_reg); 2295 2296 // This check has worked decisively for primary supers. 2297 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2298 // (Secondary supers are interfaces and very deeply nested subtypes.) 2299 // This works in the same check above because of a tricky aliasing 2300 // between the super_cache and the primary super display elements. 2301 // (The 'super_check_addr' can address either, as the case requires.) 2302 // Note that the cache is updated below if it does not help us find 2303 // what we need immediately. 2304 // So if it was a primary super, we can just fail immediately. 2305 // Otherwise, it's the slow path for us (no success at this point). 2306 2307 // Hacked ba(), which may only be used just before L_fallthrough. 2308 #define FINAL_JUMP(label) \ 2309 if (&(label) != &L_fallthrough) { \ 2310 ba(label); delayed()->nop(); \ 2311 } 2312 2313 if (super_check_offset.is_register()) { 2314 brx(Assembler::equal, false, Assembler::pn, *L_success); 2315 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2316 2317 if (L_failure == &L_fallthrough) { 2318 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2319 delayed()->nop(); 2320 } else { 2321 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2322 delayed()->nop(); 2323 FINAL_JUMP(*L_slow_path); 2324 } 2325 } else if (super_check_offset.as_constant() == sc_offset) { 2326 // Need a slow path; fast failure is impossible. 2327 if (L_slow_path == &L_fallthrough) { 2328 brx(Assembler::equal, false, Assembler::pt, *L_success); 2329 delayed()->nop(); 2330 } else { 2331 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2332 delayed()->nop(); 2333 FINAL_JUMP(*L_success); 2334 } 2335 } else { 2336 // No slow path; it's a fast decision. 2337 if (L_failure == &L_fallthrough) { 2338 brx(Assembler::equal, false, Assembler::pt, *L_success); 2339 delayed()->nop(); 2340 } else { 2341 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2342 delayed()->nop(); 2343 FINAL_JUMP(*L_success); 2344 } 2345 } 2346 2347 bind(L_fallthrough); 2348 2349 #undef FINAL_JUMP 2350 } 2351 2352 2353 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2354 Register super_klass, 2355 Register count_temp, 2356 Register scan_temp, 2357 Register scratch_reg, 2358 Register coop_reg, 2359 Label* L_success, 2360 Label* L_failure) { 2361 assert_different_registers(sub_klass, super_klass, 2362 count_temp, scan_temp, scratch_reg, coop_reg); 2363 2364 Label L_fallthrough, L_loop; 2365 int label_nulls = 0; 2366 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2367 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2368 assert(label_nulls <= 1, "at most one NULL in the batch"); 2369 2370 // a couple of useful fields in sub_klass: 2371 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2372 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2373 2374 // Do a linear scan of the secondary super-klass chain. 2375 // This code is rarely used, so simplicity is a virtue here. 2376 2377 #ifndef PRODUCT 2378 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2379 inc_counter((address) pst_counter, count_temp, scan_temp); 2380 #endif 2381 2382 // We will consult the secondary-super array. 2383 ld_ptr(sub_klass, ss_offset, scan_temp); 2384 2385 Register search_key = super_klass; 2386 2387 // Load the array length. (Positive movl does right thing on LP64.) 2388 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2389 2390 // Check for empty secondary super list 2391 tst(count_temp); 2392 2393 // In the array of super classes elements are pointer sized. 2394 int element_size = wordSize; 2395 2396 // Top of search loop 2397 bind(L_loop); 2398 br(Assembler::equal, false, Assembler::pn, *L_failure); 2399 delayed()->add(scan_temp, element_size, scan_temp); 2400 2401 // Skip the array header in all array accesses. 2402 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2403 elem_offset -= element_size; // the scan pointer was pre-incremented also 2404 2405 // Load next super to check 2406 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2407 2408 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2409 cmp(scratch_reg, search_key); 2410 2411 // A miss means we are NOT a subtype and need to keep looping 2412 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2413 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2414 2415 // Success. Cache the super we found and proceed in triumph. 2416 st_ptr(super_klass, sub_klass, sc_offset); 2417 2418 if (L_success != &L_fallthrough) { 2419 ba(*L_success); 2420 delayed()->nop(); 2421 } 2422 2423 bind(L_fallthrough); 2424 } 2425 2426 2427 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2428 Register temp_reg, 2429 int extra_slot_offset) { 2430 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2431 int stackElementSize = Interpreter::stackElementSize; 2432 int offset = extra_slot_offset * stackElementSize; 2433 if (arg_slot.is_constant()) { 2434 offset += arg_slot.as_constant() * stackElementSize; 2435 return offset; 2436 } else { 2437 assert(temp_reg != noreg, "must specify"); 2438 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2439 if (offset != 0) 2440 add(temp_reg, offset, temp_reg); 2441 return temp_reg; 2442 } 2443 } 2444 2445 2446 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2447 Register temp_reg, 2448 int extra_slot_offset) { 2449 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2450 } 2451 2452 2453 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2454 Register temp_reg, 2455 Label& done, Label* slow_case, 2456 BiasedLockingCounters* counters) { 2457 assert(UseBiasedLocking, "why call this otherwise?"); 2458 2459 if (PrintBiasedLockingStatistics) { 2460 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2461 if (counters == NULL) 2462 counters = BiasedLocking::counters(); 2463 } 2464 2465 Label cas_label; 2466 2467 // Biased locking 2468 // See whether the lock is currently biased toward our thread and 2469 // whether the epoch is still valid 2470 // Note that the runtime guarantees sufficient alignment of JavaThread 2471 // pointers to allow age to be placed into low bits 2472 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2473 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2474 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2475 2476 load_klass(obj_reg, temp_reg); 2477 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2478 or3(G2_thread, temp_reg, temp_reg); 2479 xor3(mark_reg, temp_reg, temp_reg); 2480 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2481 if (counters != NULL) { 2482 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2483 // Reload mark_reg as we may need it later 2484 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2485 } 2486 brx(Assembler::equal, true, Assembler::pt, done); 2487 delayed()->nop(); 2488 2489 Label try_revoke_bias; 2490 Label try_rebias; 2491 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2492 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2493 2494 // At this point we know that the header has the bias pattern and 2495 // that we are not the bias owner in the current epoch. We need to 2496 // figure out more details about the state of the header in order to 2497 // know what operations can be legally performed on the object's 2498 // header. 2499 2500 // If the low three bits in the xor result aren't clear, that means 2501 // the prototype header is no longer biased and we have to revoke 2502 // the bias on this object. 2503 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2504 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2505 2506 // Biasing is still enabled for this data type. See whether the 2507 // epoch of the current bias is still valid, meaning that the epoch 2508 // bits of the mark word are equal to the epoch bits of the 2509 // prototype header. (Note that the prototype header's epoch bits 2510 // only change at a safepoint.) If not, attempt to rebias the object 2511 // toward the current thread. Note that we must be absolutely sure 2512 // that the current epoch is invalid in order to do this because 2513 // otherwise the manipulations it performs on the mark word are 2514 // illegal. 2515 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2516 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2517 2518 // The epoch of the current bias is still valid but we know nothing 2519 // about the owner; it might be set or it might be clear. Try to 2520 // acquire the bias of the object using an atomic operation. If this 2521 // fails we will go in to the runtime to revoke the object's bias. 2522 // Note that we first construct the presumed unbiased header so we 2523 // don't accidentally blow away another thread's valid bias. 2524 delayed()->and3(mark_reg, 2525 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2526 mark_reg); 2527 or3(G2_thread, mark_reg, temp_reg); 2528 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2529 // If the biasing toward our thread failed, this means that 2530 // another thread succeeded in biasing it toward itself and we 2531 // need to revoke that bias. The revocation will occur in the 2532 // interpreter runtime in the slow case. 2533 cmp(mark_reg, temp_reg); 2534 if (counters != NULL) { 2535 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2536 } 2537 if (slow_case != NULL) { 2538 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2539 delayed()->nop(); 2540 } 2541 ba_short(done); 2542 2543 bind(try_rebias); 2544 // At this point we know the epoch has expired, meaning that the 2545 // current "bias owner", if any, is actually invalid. Under these 2546 // circumstances _only_, we are allowed to use the current header's 2547 // value as the comparison value when doing the cas to acquire the 2548 // bias in the current epoch. In other words, we allow transfer of 2549 // the bias from one thread to another directly in this situation. 2550 // 2551 // FIXME: due to a lack of registers we currently blow away the age 2552 // bits in this situation. Should attempt to preserve them. 2553 load_klass(obj_reg, temp_reg); 2554 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2555 or3(G2_thread, temp_reg, temp_reg); 2556 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2557 // If the biasing toward our thread failed, this means that 2558 // another thread succeeded in biasing it toward itself and we 2559 // need to revoke that bias. The revocation will occur in the 2560 // interpreter runtime in the slow case. 2561 cmp(mark_reg, temp_reg); 2562 if (counters != NULL) { 2563 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2564 } 2565 if (slow_case != NULL) { 2566 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2567 delayed()->nop(); 2568 } 2569 ba_short(done); 2570 2571 bind(try_revoke_bias); 2572 // The prototype mark in the klass doesn't have the bias bit set any 2573 // more, indicating that objects of this data type are not supposed 2574 // to be biased any more. We are going to try to reset the mark of 2575 // this object to the prototype value and fall through to the 2576 // CAS-based locking scheme. Note that if our CAS fails, it means 2577 // that another thread raced us for the privilege of revoking the 2578 // bias of this particular object, so it's okay to continue in the 2579 // normal locking code. 2580 // 2581 // FIXME: due to a lack of registers we currently blow away the age 2582 // bits in this situation. Should attempt to preserve them. 2583 load_klass(obj_reg, temp_reg); 2584 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2585 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2586 // Fall through to the normal CAS-based lock, because no matter what 2587 // the result of the above CAS, some thread must have succeeded in 2588 // removing the bias bit from the object's header. 2589 if (counters != NULL) { 2590 cmp(mark_reg, temp_reg); 2591 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2592 } 2593 2594 bind(cas_label); 2595 } 2596 2597 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2598 bool allow_delay_slot_filling) { 2599 // Check for biased locking unlock case, which is a no-op 2600 // Note: we do not have to check the thread ID for two reasons. 2601 // First, the interpreter checks for IllegalMonitorStateException at 2602 // a higher level. Second, if the bias was revoked while we held the 2603 // lock, the object could not be rebiased toward another thread, so 2604 // the bias bit would be clear. 2605 ld_ptr(mark_addr, temp_reg); 2606 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2607 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2608 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2609 delayed(); 2610 if (!allow_delay_slot_filling) { 2611 nop(); 2612 } 2613 } 2614 2615 2616 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2617 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2618 // The code could be tightened up considerably. 2619 // 2620 // box->dhw disposition - post-conditions at DONE_LABEL. 2621 // - Successful inflated lock: box->dhw != 0. 2622 // Any non-zero value suffices. 2623 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2624 // - Successful Stack-lock: box->dhw == mark. 2625 // box->dhw must contain the displaced mark word value 2626 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2627 // The slow-path fast_enter() and slow_enter() operators 2628 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2629 // - Biased: box->dhw is undefined 2630 // 2631 // SPARC refworkload performance - specifically jetstream and scimark - are 2632 // extremely sensitive to the size of the code emitted by compiler_lock_object 2633 // and compiler_unlock_object. Critically, the key factor is code size, not path 2634 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2635 // effect). 2636 2637 2638 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2639 Register Rbox, Register Rscratch, 2640 BiasedLockingCounters* counters, 2641 bool try_bias) { 2642 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2643 2644 verify_oop(Roop); 2645 Label done ; 2646 2647 if (counters != NULL) { 2648 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2649 } 2650 2651 if (EmitSync & 1) { 2652 mov(3, Rscratch); 2653 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2654 cmp(SP, G0); 2655 return ; 2656 } 2657 2658 if (EmitSync & 2) { 2659 2660 // Fetch object's markword 2661 ld_ptr(mark_addr, Rmark); 2662 2663 if (try_bias) { 2664 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2665 } 2666 2667 // Save Rbox in Rscratch to be used for the cas operation 2668 mov(Rbox, Rscratch); 2669 2670 // set Rmark to markOop | markOopDesc::unlocked_value 2671 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2672 2673 // Initialize the box. (Must happen before we update the object mark!) 2674 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2675 2676 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2677 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2678 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2679 2680 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2681 // hence we are done 2682 cmp(Rmark, Rscratch); 2683 sub(Rscratch, STACK_BIAS, Rscratch); 2684 brx(Assembler::equal, false, Assembler::pt, done); 2685 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2686 2687 // we did not find an unlocked object so see if this is a recursive case 2688 // sub(Rscratch, SP, Rscratch); 2689 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2690 andcc(Rscratch, 0xfffff003, Rscratch); 2691 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2692 bind (done); 2693 return ; 2694 } 2695 2696 Label Egress ; 2697 2698 if (EmitSync & 256) { 2699 Label IsInflated ; 2700 2701 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2702 // Triage: biased, stack-locked, neutral, inflated 2703 if (try_bias) { 2704 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2705 // Invariant: if control reaches this point in the emitted stream 2706 // then Rmark has not been modified. 2707 } 2708 2709 // Store mark into displaced mark field in the on-stack basic-lock "box" 2710 // Critically, this must happen before the CAS 2711 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2712 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2713 andcc(Rmark, 2, G0); 2714 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2715 delayed()-> 2716 2717 // Try stack-lock acquisition. 2718 // Beware: the 1st instruction is in a delay slot 2719 mov(Rbox, Rscratch); 2720 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2721 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2722 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2723 cmp(Rmark, Rscratch); 2724 brx(Assembler::equal, false, Assembler::pt, done); 2725 delayed()->sub(Rscratch, SP, Rscratch); 2726 2727 // Stack-lock attempt failed - check for recursive stack-lock. 2728 // See the comments below about how we might remove this case. 2729 sub(Rscratch, STACK_BIAS, Rscratch); 2730 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2731 andcc(Rscratch, 0xfffff003, Rscratch); 2732 br(Assembler::always, false, Assembler::pt, done); 2733 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2734 2735 bind(IsInflated); 2736 if (EmitSync & 64) { 2737 // If m->owner != null goto IsLocked 2738 // Pessimistic form: Test-and-CAS vs CAS 2739 // The optimistic form avoids RTS->RTO cache line upgrades. 2740 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2741 andcc(Rscratch, Rscratch, G0); 2742 brx(Assembler::notZero, false, Assembler::pn, done); 2743 delayed()->nop(); 2744 // m->owner == null : it's unlocked. 2745 } 2746 2747 // Try to CAS m->owner from null to Self 2748 // Invariant: if we acquire the lock then _recursions should be 0. 2749 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2750 mov(G2_thread, Rscratch); 2751 cas_ptr(Rmark, G0, Rscratch); 2752 cmp(Rscratch, G0); 2753 // Intentional fall-through into done 2754 } else { 2755 // Aggressively avoid the Store-before-CAS penalty 2756 // Defer the store into box->dhw until after the CAS 2757 Label IsInflated, Recursive ; 2758 2759 // Anticipate CAS -- Avoid RTS->RTO upgrade 2760 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2761 2762 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2763 // Triage: biased, stack-locked, neutral, inflated 2764 2765 if (try_bias) { 2766 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2767 // Invariant: if control reaches this point in the emitted stream 2768 // then Rmark has not been modified. 2769 } 2770 andcc(Rmark, 2, G0); 2771 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2772 delayed()-> // Beware - dangling delay-slot 2773 2774 // Try stack-lock acquisition. 2775 // Transiently install BUSY (0) encoding in the mark word. 2776 // if the CAS of 0 into the mark was successful then we execute: 2777 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2778 // ST obj->mark = box -- overwrite transient 0 value 2779 // This presumes TSO, of course. 2780 2781 mov(0, Rscratch); 2782 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2783 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2784 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2785 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2786 cmp(Rscratch, Rmark); 2787 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2788 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2789 if (counters != NULL) { 2790 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2791 } 2792 ba(done); 2793 delayed()->st_ptr(Rbox, mark_addr); 2794 2795 bind(Recursive); 2796 // Stack-lock attempt failed - check for recursive stack-lock. 2797 // Tests show that we can remove the recursive case with no impact 2798 // on refworkload 0.83. If we need to reduce the size of the code 2799 // emitted by compiler_lock_object() the recursive case is perfect 2800 // candidate. 2801 // 2802 // A more extreme idea is to always inflate on stack-lock recursion. 2803 // This lets us eliminate the recursive checks in compiler_lock_object 2804 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2805 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2806 // and showed a performance *increase*. In the same experiment I eliminated 2807 // the fast-path stack-lock code from the interpreter and always passed 2808 // control to the "slow" operators in synchronizer.cpp. 2809 2810 // RScratch contains the fetched obj->mark value from the failed CAS. 2811 sub(Rscratch, STACK_BIAS, Rscratch); 2812 sub(Rscratch, SP, Rscratch); 2813 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2814 andcc(Rscratch, 0xfffff003, Rscratch); 2815 if (counters != NULL) { 2816 // Accounting needs the Rscratch register 2817 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2818 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2819 ba_short(done); 2820 } else { 2821 ba(done); 2822 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2823 } 2824 2825 bind (IsInflated); 2826 2827 // Try to CAS m->owner from null to Self 2828 // Invariant: if we acquire the lock then _recursions should be 0. 2829 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2830 mov(G2_thread, Rscratch); 2831 cas_ptr(Rmark, G0, Rscratch); 2832 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2833 // set icc.zf : 1=success 0=failure 2834 // ST box->displaced_header = NonZero. 2835 // Any non-zero value suffices: 2836 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2837 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2838 // Intentional fall-through into done 2839 } 2840 2841 bind (done); 2842 } 2843 2844 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2845 Register Rbox, Register Rscratch, 2846 bool try_bias) { 2847 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2848 2849 Label done ; 2850 2851 if (EmitSync & 4) { 2852 cmp(SP, G0); 2853 return ; 2854 } 2855 2856 if (EmitSync & 8) { 2857 if (try_bias) { 2858 biased_locking_exit(mark_addr, Rscratch, done); 2859 } 2860 2861 // Test first if it is a fast recursive unlock 2862 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2863 br_null_short(Rmark, Assembler::pt, done); 2864 2865 // Check if it is still a light weight lock, this is is true if we see 2866 // the stack address of the basicLock in the markOop of the object 2867 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2868 cas_ptr(mark_addr.base(), Rbox, Rmark); 2869 ba(done); 2870 delayed()->cmp(Rbox, Rmark); 2871 bind(done); 2872 return ; 2873 } 2874 2875 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2876 // is too large performance rolls abruptly off a cliff. 2877 // This could be related to inlining policies, code cache management, or 2878 // I$ effects. 2879 Label LStacked ; 2880 2881 if (try_bias) { 2882 // TODO: eliminate redundant LDs of obj->mark 2883 biased_locking_exit(mark_addr, Rscratch, done); 2884 } 2885 2886 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2887 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2888 andcc(Rscratch, Rscratch, G0); 2889 brx(Assembler::zero, false, Assembler::pn, done); 2890 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2891 andcc(Rmark, 2, G0); 2892 brx(Assembler::zero, false, Assembler::pt, LStacked); 2893 delayed()->nop(); 2894 2895 // It's inflated 2896 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2897 // the ST of 0 into _owner which releases the lock. This prevents loads 2898 // and stores within the critical section from reordering (floating) 2899 // past the store that releases the lock. But TSO is a strong memory model 2900 // and that particular flavor of barrier is a noop, so we can safely elide it. 2901 // Note that we use 1-0 locking by default for the inflated case. We 2902 // close the resultant (and rare) race by having contended threads in 2903 // monitorenter periodically poll _owner. 2904 2905 if (EmitSync & 1024) { 2906 // Emit code to check that _owner == Self 2907 // We could fold the _owner test into subsequent code more efficiently 2908 // than using a stand-alone check, but since _owner checking is off by 2909 // default we don't bother. We also might consider predicating the 2910 // _owner==Self check on Xcheck:jni or running on a debug build. 2911 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2912 orcc(Rscratch, G0, G0); 2913 brx(Assembler::notZero, false, Assembler::pn, done); 2914 delayed()->nop(); 2915 } 2916 2917 if (EmitSync & 512) { 2918 // classic lock release code absent 1-0 locking 2919 // m->Owner = null; 2920 // membar #storeload 2921 // if (m->cxq|m->EntryList) == null goto Success 2922 // if (m->succ != null) goto Success 2923 // if CAS (&m->Owner,0,Self) != 0 goto Success 2924 // goto SlowPath 2925 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2926 orcc(Rbox, G0, G0); 2927 brx(Assembler::notZero, false, Assembler::pn, done); 2928 delayed()->nop(); 2929 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2930 if (os::is_MP()) { membar(StoreLoad); } 2931 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2932 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2933 orcc(Rbox, Rscratch, G0); 2934 brx(Assembler::zero, false, Assembler::pt, done); 2935 delayed()-> 2936 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2937 andcc(Rscratch, Rscratch, G0); 2938 brx(Assembler::notZero, false, Assembler::pt, done); 2939 delayed()->andcc(G0, G0, G0); 2940 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2941 mov(G2_thread, Rscratch); 2942 cas_ptr(Rmark, G0, Rscratch); 2943 cmp(Rscratch, G0); 2944 // invert icc.zf and goto done 2945 brx(Assembler::notZero, false, Assembler::pt, done); 2946 delayed()->cmp(G0, G0); 2947 br(Assembler::always, false, Assembler::pt, done); 2948 delayed()->cmp(G0, 1); 2949 } else { 2950 // 1-0 form : avoids CAS and MEMBAR in the common case 2951 // Do not bother to ratify that m->Owner == Self. 2952 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2953 orcc(Rbox, G0, G0); 2954 brx(Assembler::notZero, false, Assembler::pn, done); 2955 delayed()-> 2956 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2957 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2958 orcc(Rbox, Rscratch, G0); 2959 if (EmitSync & 16384) { 2960 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2961 // we should transfer control directly to the slow-path. 2962 // This test makes the reacquire operation below very infrequent. 2963 // The logic is equivalent to : 2964 // if (cxq|EntryList) == null : Owner=null; goto Success 2965 // if succ == null : goto SlowPath 2966 // Owner=null; membar #storeload 2967 // if succ != null : goto Success 2968 // if CAS(&Owner,null,Self) != null goto Success 2969 // goto SlowPath 2970 brx(Assembler::zero, true, Assembler::pt, done); 2971 delayed()-> 2972 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2973 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2974 andcc(Rscratch, Rscratch, G0) ; 2975 brx(Assembler::zero, false, Assembler::pt, done); 2976 delayed()->orcc(G0, 1, G0); 2977 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2978 } else { 2979 brx(Assembler::zero, false, Assembler::pt, done); 2980 delayed()-> 2981 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2982 } 2983 if (os::is_MP()) { membar(StoreLoad); } 2984 // Check that _succ is (or remains) non-zero 2985 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2986 andcc(Rscratch, Rscratch, G0); 2987 brx(Assembler::notZero, false, Assembler::pt, done); 2988 delayed()->andcc(G0, G0, G0); 2989 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2990 mov(G2_thread, Rscratch); 2991 cas_ptr(Rmark, G0, Rscratch); 2992 cmp(Rscratch, G0); 2993 // invert icc.zf and goto done 2994 // A slightly better v8+/v9 idiom would be the following: 2995 // movrnz Rscratch,1,Rscratch 2996 // ba done 2997 // xorcc Rscratch,1,G0 2998 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2999 brx(Assembler::notZero, false, Assembler::pt, done); 3000 delayed()->cmp(G0, G0); 3001 br(Assembler::always, false, Assembler::pt, done); 3002 delayed()->cmp(G0, 1); 3003 } 3004 3005 bind (LStacked); 3006 // Consider: we could replace the expensive CAS in the exit 3007 // path with a simple ST of the displaced mark value fetched from 3008 // the on-stack basiclock box. That admits a race where a thread T2 3009 // in the slow lock path -- inflating with monitor M -- could race a 3010 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3011 // More precisely T1 in the stack-lock unlock path could "stomp" the 3012 // inflated mark value M installed by T2, resulting in an orphan 3013 // object monitor M and T2 becoming stranded. We can remedy that situation 3014 // by having T2 periodically poll the object's mark word using timed wait 3015 // operations. If T2 discovers that a stomp has occurred it vacates 3016 // the monitor M and wakes any other threads stranded on the now-orphan M. 3017 // In addition the monitor scavenger, which performs deflation, 3018 // would also need to check for orpan monitors and stranded threads. 3019 // 3020 // Finally, inflation is also used when T2 needs to assign a hashCode 3021 // to O and O is stack-locked by T1. The "stomp" race could cause 3022 // an assigned hashCode value to be lost. We can avoid that condition 3023 // and provide the necessary hashCode stability invariants by ensuring 3024 // that hashCode generation is idempotent between copying GCs. 3025 // For example we could compute the hashCode of an object O as 3026 // O's heap address XOR some high quality RNG value that is refreshed 3027 // at GC-time. The monitor scavenger would install the hashCode 3028 // found in any orphan monitors. Again, the mechanism admits a 3029 // lost-update "stomp" WAW race but detects and recovers as needed. 3030 // 3031 // A prototype implementation showed excellent results, although 3032 // the scavenger and timeout code was rather involved. 3033 3034 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3035 cmp(Rbox, Rscratch); 3036 // Intentional fall through into done ... 3037 3038 bind(done); 3039 } 3040 3041 3042 3043 void MacroAssembler::print_CPU_state() { 3044 // %%%%% need to implement this 3045 } 3046 3047 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3048 // %%%%% need to implement this 3049 } 3050 3051 void MacroAssembler::push_IU_state() { 3052 // %%%%% need to implement this 3053 } 3054 3055 3056 void MacroAssembler::pop_IU_state() { 3057 // %%%%% need to implement this 3058 } 3059 3060 3061 void MacroAssembler::push_FPU_state() { 3062 // %%%%% need to implement this 3063 } 3064 3065 3066 void MacroAssembler::pop_FPU_state() { 3067 // %%%%% need to implement this 3068 } 3069 3070 3071 void MacroAssembler::push_CPU_state() { 3072 // %%%%% need to implement this 3073 } 3074 3075 3076 void MacroAssembler::pop_CPU_state() { 3077 // %%%%% need to implement this 3078 } 3079 3080 3081 3082 void MacroAssembler::verify_tlab() { 3083 #ifdef ASSERT 3084 if (UseTLAB && VerifyOops) { 3085 Label next, next2, ok; 3086 Register t1 = L0; 3087 Register t2 = L1; 3088 Register t3 = L2; 3089 3090 save_frame(0); 3091 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3092 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3093 or3(t1, t2, t3); 3094 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3095 STOP("assert(top >= start)"); 3096 should_not_reach_here(); 3097 3098 bind(next); 3099 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3100 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3101 or3(t3, t2, t3); 3102 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3103 STOP("assert(top <= end)"); 3104 should_not_reach_here(); 3105 3106 bind(next2); 3107 and3(t3, MinObjAlignmentInBytesMask, t3); 3108 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3109 STOP("assert(aligned)"); 3110 should_not_reach_here(); 3111 3112 bind(ok); 3113 restore(); 3114 } 3115 #endif 3116 } 3117 3118 3119 void MacroAssembler::eden_allocate( 3120 Register obj, // result: pointer to object after successful allocation 3121 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3122 int con_size_in_bytes, // object size in bytes if known at compile time 3123 Register t1, // temp register 3124 Register t2, // temp register 3125 Label& slow_case // continuation point if fast allocation fails 3126 ){ 3127 // make sure arguments make sense 3128 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3129 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3130 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3131 3132 if (!Universe::heap()->supports_inline_contig_alloc()) { 3133 // No allocation in the shared eden. 3134 ba(slow_case); 3135 delayed()->nop(); 3136 } else { 3137 // get eden boundaries 3138 // note: we need both top & top_addr! 3139 const Register top_addr = t1; 3140 const Register end = t2; 3141 3142 CollectedHeap* ch = Universe::heap(); 3143 set((intx)ch->top_addr(), top_addr); 3144 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3145 ld_ptr(top_addr, delta, end); 3146 ld_ptr(top_addr, 0, obj); 3147 3148 // try to allocate 3149 Label retry; 3150 bind(retry); 3151 #ifdef ASSERT 3152 // make sure eden top is properly aligned 3153 { 3154 Label L; 3155 btst(MinObjAlignmentInBytesMask, obj); 3156 br(Assembler::zero, false, Assembler::pt, L); 3157 delayed()->nop(); 3158 STOP("eden top is not properly aligned"); 3159 bind(L); 3160 } 3161 #endif // ASSERT 3162 const Register free = end; 3163 sub(end, obj, free); // compute amount of free space 3164 if (var_size_in_bytes->is_valid()) { 3165 // size is unknown at compile time 3166 cmp(free, var_size_in_bytes); 3167 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3168 delayed()->add(obj, var_size_in_bytes, end); 3169 } else { 3170 // size is known at compile time 3171 cmp(free, con_size_in_bytes); 3172 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3173 delayed()->add(obj, con_size_in_bytes, end); 3174 } 3175 // Compare obj with the value at top_addr; if still equal, swap the value of 3176 // end with the value at top_addr. If not equal, read the value at top_addr 3177 // into end. 3178 cas_ptr(top_addr, obj, end); 3179 // if someone beat us on the allocation, try again, otherwise continue 3180 cmp(obj, end); 3181 brx(Assembler::notEqual, false, Assembler::pn, retry); 3182 delayed()->mov(end, obj); // nop if successfull since obj == end 3183 3184 #ifdef ASSERT 3185 // make sure eden top is properly aligned 3186 { 3187 Label L; 3188 const Register top_addr = t1; 3189 3190 set((intx)ch->top_addr(), top_addr); 3191 ld_ptr(top_addr, 0, top_addr); 3192 btst(MinObjAlignmentInBytesMask, top_addr); 3193 br(Assembler::zero, false, Assembler::pt, L); 3194 delayed()->nop(); 3195 STOP("eden top is not properly aligned"); 3196 bind(L); 3197 } 3198 #endif // ASSERT 3199 } 3200 } 3201 3202 3203 void MacroAssembler::tlab_allocate( 3204 Register obj, // result: pointer to object after successful allocation 3205 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3206 int con_size_in_bytes, // object size in bytes if known at compile time 3207 Register t1, // temp register 3208 Label& slow_case // continuation point if fast allocation fails 3209 ){ 3210 // make sure arguments make sense 3211 assert_different_registers(obj, var_size_in_bytes, t1); 3212 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3213 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3214 3215 const Register free = t1; 3216 3217 verify_tlab(); 3218 3219 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3220 3221 // calculate amount of free space 3222 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3223 sub(free, obj, free); 3224 3225 Label done; 3226 if (var_size_in_bytes == noreg) { 3227 cmp(free, con_size_in_bytes); 3228 } else { 3229 cmp(free, var_size_in_bytes); 3230 } 3231 br(Assembler::less, false, Assembler::pn, slow_case); 3232 // calculate the new top pointer 3233 if (var_size_in_bytes == noreg) { 3234 delayed()->add(obj, con_size_in_bytes, free); 3235 } else { 3236 delayed()->add(obj, var_size_in_bytes, free); 3237 } 3238 3239 bind(done); 3240 3241 #ifdef ASSERT 3242 // make sure new free pointer is properly aligned 3243 { 3244 Label L; 3245 btst(MinObjAlignmentInBytesMask, free); 3246 br(Assembler::zero, false, Assembler::pt, L); 3247 delayed()->nop(); 3248 STOP("updated TLAB free is not properly aligned"); 3249 bind(L); 3250 } 3251 #endif // ASSERT 3252 3253 // update the tlab top pointer 3254 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3255 verify_tlab(); 3256 } 3257 3258 void MacroAssembler::zero_memory(Register base, Register index) { 3259 assert_different_registers(base, index); 3260 Label loop; 3261 bind(loop); 3262 subcc(index, HeapWordSize, index); 3263 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3264 delayed()->st_ptr(G0, base, index); 3265 } 3266 3267 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3268 Register t1, Register t2) { 3269 // Bump total bytes allocated by this thread 3270 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3271 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3272 // v8 support has gone the way of the dodo 3273 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3274 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3275 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3276 } 3277 3278 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3279 switch (cond) { 3280 // Note some conditions are synonyms for others 3281 case Assembler::never: return Assembler::always; 3282 case Assembler::zero: return Assembler::notZero; 3283 case Assembler::lessEqual: return Assembler::greater; 3284 case Assembler::less: return Assembler::greaterEqual; 3285 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3286 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3287 case Assembler::negative: return Assembler::positive; 3288 case Assembler::overflowSet: return Assembler::overflowClear; 3289 case Assembler::always: return Assembler::never; 3290 case Assembler::notZero: return Assembler::zero; 3291 case Assembler::greater: return Assembler::lessEqual; 3292 case Assembler::greaterEqual: return Assembler::less; 3293 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3294 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3295 case Assembler::positive: return Assembler::negative; 3296 case Assembler::overflowClear: return Assembler::overflowSet; 3297 } 3298 3299 ShouldNotReachHere(); return Assembler::overflowClear; 3300 } 3301 3302 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3303 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3304 Condition negated_cond = negate_condition(cond); 3305 Label L; 3306 brx(negated_cond, false, Assembler::pt, L); 3307 delayed()->nop(); 3308 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3309 bind(L); 3310 } 3311 3312 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3313 AddressLiteral addrlit(counter_addr); 3314 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3315 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3316 ld(addr, Rtmp2); 3317 inc(Rtmp2); 3318 st(Rtmp2, addr); 3319 } 3320 3321 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3322 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3323 } 3324 3325 SkipIfEqual::SkipIfEqual( 3326 MacroAssembler* masm, Register temp, const bool* flag_addr, 3327 Assembler::Condition condition) { 3328 _masm = masm; 3329 AddressLiteral flag(flag_addr); 3330 _masm->sethi(flag, temp); 3331 _masm->ldub(temp, flag.low10(), temp); 3332 _masm->tst(temp); 3333 _masm->br(condition, false, Assembler::pt, _label); 3334 _masm->delayed()->nop(); 3335 } 3336 3337 SkipIfEqual::~SkipIfEqual() { 3338 _masm->bind(_label); 3339 } 3340 3341 void MacroAssembler::bang_stack_with_offset(int offset) { 3342 // stack grows down, caller passes positive offset 3343 assert(offset > 0, "must bang with negative offset"); 3344 set((-offset)+STACK_BIAS, G3_scratch); 3345 st(G0, SP, G3_scratch); 3346 } 3347 3348 // Writes to stack successive pages until offset reached to check for 3349 // stack overflow + shadow pages. This clobbers tsp and scratch. 3350 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3351 Register Rscratch) { 3352 // Use stack pointer in temp stack pointer 3353 mov(SP, Rtsp); 3354 3355 // Bang stack for total size given plus stack shadow page size. 3356 // Bang one page at a time because a large size can overflow yellow and 3357 // red zones (the bang will fail but stack overflow handling can't tell that 3358 // it was a stack overflow bang vs a regular segv). 3359 int offset = os::vm_page_size(); 3360 Register Roffset = Rscratch; 3361 3362 Label loop; 3363 bind(loop); 3364 set((-offset)+STACK_BIAS, Rscratch); 3365 st(G0, Rtsp, Rscratch); 3366 set(offset, Roffset); 3367 sub(Rsize, Roffset, Rsize); 3368 cmp(Rsize, G0); 3369 br(Assembler::greater, false, Assembler::pn, loop); 3370 delayed()->sub(Rtsp, Roffset, Rtsp); 3371 3372 // Bang down shadow pages too. 3373 // At this point, (tmp-0) is the last address touched, so don't 3374 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3375 // was post-decremented.) Skip this address by starting at i=1, and 3376 // touch a few more pages below. N.B. It is important to touch all 3377 // the way down to and including i=StackShadowPages. 3378 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3379 set((-i*offset)+STACK_BIAS, Rscratch); 3380 st(G0, Rtsp, Rscratch); 3381 } 3382 } 3383 3384 void MacroAssembler::reserved_stack_check() { 3385 // testing if reserved zone needs to be enabled 3386 Label no_reserved_zone_enabling; 3387 3388 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3389 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3390 3391 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3392 3393 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3394 jump_to(stub, G4_scratch); 3395 delayed()->restore(); 3396 3397 should_not_reach_here(); 3398 3399 bind(no_reserved_zone_enabling); 3400 } 3401 // ((OopHandle)result).resolve(); 3402 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 3403 // OopHandle::resolve is an indirection. 3404 access_load_at(T_OBJECT, IN_NATIVE, Address(result, 0), result, tmp); 3405 } 3406 3407 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 3408 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3409 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3410 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3411 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3412 ld_ptr(mirror, mirror_offset, mirror); 3413 resolve_oop_handle(mirror, tmp); 3414 } 3415 3416 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3417 // The number of bytes in this code is used by 3418 // MachCallDynamicJavaNode::ret_addr_offset() 3419 // if this changes, change that. 3420 if (UseCompressedClassPointers) { 3421 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3422 decode_klass_not_null(klass); 3423 } else { 3424 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3425 } 3426 } 3427 3428 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3429 if (UseCompressedClassPointers) { 3430 assert(dst_oop != klass, "not enough registers"); 3431 encode_klass_not_null(klass); 3432 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3433 } else { 3434 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3435 } 3436 } 3437 3438 void MacroAssembler::store_klass_gap(Register s, Register d) { 3439 if (UseCompressedClassPointers) { 3440 assert(s != d, "not enough registers"); 3441 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3442 } 3443 } 3444 3445 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 3446 Register src, Address dst, Register tmp) { 3447 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3448 decorators = AccessInternal::decorator_fixup(decorators); 3449 bool as_raw = (decorators & AS_RAW) != 0; 3450 if (as_raw) { 3451 bs->BarrierSetAssembler::store_at(this, decorators, type, src, dst, tmp); 3452 } else { 3453 bs->store_at(this, decorators, type, src, dst, tmp); 3454 } 3455 } 3456 3457 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 3458 Address src, Register dst, Register tmp) { 3459 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3460 decorators = AccessInternal::decorator_fixup(decorators); 3461 bool as_raw = (decorators & AS_RAW) != 0; 3462 if (as_raw) { 3463 bs->BarrierSetAssembler::load_at(this, decorators, type, src, dst, tmp); 3464 } else { 3465 bs->load_at(this, decorators, type, src, dst, tmp); 3466 } 3467 } 3468 3469 void MacroAssembler::load_heap_oop(const Address& s, Register d, Register tmp, DecoratorSet decorators) { 3470 access_load_at(T_OBJECT, IN_HEAP | decorators, s, d, tmp); 3471 } 3472 3473 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d, Register tmp, DecoratorSet decorators) { 3474 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2), d, tmp); 3475 } 3476 3477 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d, Register tmp, DecoratorSet decorators) { 3478 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, simm13a), d, tmp); 3479 } 3480 3481 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d, Register tmp, DecoratorSet decorators) { 3482 if (s2.is_constant()) { 3483 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_constant()), d, tmp); 3484 } else { 3485 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_register()), d, tmp); 3486 } 3487 } 3488 3489 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2, Register tmp, DecoratorSet decorators) { 3490 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, s2), tmp); 3491 } 3492 3493 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a, Register tmp, DecoratorSet decorators) { 3494 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, simm13a), tmp); 3495 } 3496 3497 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset, Register tmp, DecoratorSet decorators) { 3498 if (a.has_index()) { 3499 assert(!a.has_disp(), "not supported yet"); 3500 assert(offset == 0, "not supported yet"); 3501 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.index()), tmp); 3502 } else { 3503 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.disp() + offset), tmp); 3504 } 3505 } 3506 3507 3508 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3509 assert (UseCompressedOops, "must be compressed"); 3510 assert (Universe::heap() != NULL, "java heap should be initialized"); 3511 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3512 verify_oop(src); 3513 if (Universe::narrow_oop_base() == NULL) { 3514 srlx(src, LogMinObjAlignmentInBytes, dst); 3515 return; 3516 } 3517 Label done; 3518 if (src == dst) { 3519 // optimize for frequent case src == dst 3520 bpr(rc_nz, true, Assembler::pt, src, done); 3521 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3522 bind(done); 3523 srlx(src, LogMinObjAlignmentInBytes, dst); 3524 } else { 3525 bpr(rc_z, false, Assembler::pn, src, done); 3526 delayed() -> mov(G0, dst); 3527 // could be moved before branch, and annulate delay, 3528 // but may add some unneeded work decoding null 3529 sub(src, G6_heapbase, dst); 3530 srlx(dst, LogMinObjAlignmentInBytes, dst); 3531 bind(done); 3532 } 3533 } 3534 3535 3536 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3537 assert (UseCompressedOops, "must be compressed"); 3538 assert (Universe::heap() != NULL, "java heap should be initialized"); 3539 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3540 verify_oop(r); 3541 if (Universe::narrow_oop_base() != NULL) 3542 sub(r, G6_heapbase, r); 3543 srlx(r, LogMinObjAlignmentInBytes, r); 3544 } 3545 3546 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3547 assert (UseCompressedOops, "must be compressed"); 3548 assert (Universe::heap() != NULL, "java heap should be initialized"); 3549 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3550 verify_oop(src); 3551 if (Universe::narrow_oop_base() == NULL) { 3552 srlx(src, LogMinObjAlignmentInBytes, dst); 3553 } else { 3554 sub(src, G6_heapbase, dst); 3555 srlx(dst, LogMinObjAlignmentInBytes, dst); 3556 } 3557 } 3558 3559 // Same algorithm as oops.inline.hpp decode_heap_oop. 3560 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3561 assert (UseCompressedOops, "must be compressed"); 3562 assert (Universe::heap() != NULL, "java heap should be initialized"); 3563 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3564 sllx(src, LogMinObjAlignmentInBytes, dst); 3565 if (Universe::narrow_oop_base() != NULL) { 3566 Label done; 3567 bpr(rc_nz, true, Assembler::pt, dst, done); 3568 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3569 bind(done); 3570 } 3571 verify_oop(dst); 3572 } 3573 3574 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3575 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3576 // pd_code_size_limit. 3577 // Also do not verify_oop as this is called by verify_oop. 3578 assert (UseCompressedOops, "must be compressed"); 3579 assert (Universe::heap() != NULL, "java heap should be initialized"); 3580 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3581 sllx(r, LogMinObjAlignmentInBytes, r); 3582 if (Universe::narrow_oop_base() != NULL) 3583 add(r, G6_heapbase, r); 3584 } 3585 3586 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3587 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3588 // pd_code_size_limit. 3589 // Also do not verify_oop as this is called by verify_oop. 3590 assert (UseCompressedOops, "must be compressed"); 3591 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3592 sllx(src, LogMinObjAlignmentInBytes, dst); 3593 if (Universe::narrow_oop_base() != NULL) 3594 add(dst, G6_heapbase, dst); 3595 } 3596 3597 void MacroAssembler::encode_klass_not_null(Register r) { 3598 assert (UseCompressedClassPointers, "must be compressed"); 3599 if (Universe::narrow_klass_base() != NULL) { 3600 assert(r != G6_heapbase, "bad register choice"); 3601 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3602 sub(r, G6_heapbase, r); 3603 if (Universe::narrow_klass_shift() != 0) { 3604 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3605 srlx(r, LogKlassAlignmentInBytes, r); 3606 } 3607 reinit_heapbase(); 3608 } else { 3609 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3610 srlx(r, Universe::narrow_klass_shift(), r); 3611 } 3612 } 3613 3614 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3615 if (src == dst) { 3616 encode_klass_not_null(src); 3617 } else { 3618 assert (UseCompressedClassPointers, "must be compressed"); 3619 if (Universe::narrow_klass_base() != NULL) { 3620 set((intptr_t)Universe::narrow_klass_base(), dst); 3621 sub(src, dst, dst); 3622 if (Universe::narrow_klass_shift() != 0) { 3623 srlx(dst, LogKlassAlignmentInBytes, dst); 3624 } 3625 } else { 3626 // shift src into dst 3627 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3628 srlx(src, Universe::narrow_klass_shift(), dst); 3629 } 3630 } 3631 } 3632 3633 // Function instr_size_for_decode_klass_not_null() counts the instructions 3634 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3635 // the instructions they generate change, then this method needs to be updated. 3636 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3637 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3638 int num_instrs = 1; // shift src,dst or add 3639 if (Universe::narrow_klass_base() != NULL) { 3640 // set + add + set 3641 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 3642 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 3643 if (Universe::narrow_klass_shift() != 0) { 3644 num_instrs += 1; // sllx 3645 } 3646 } 3647 return num_instrs * BytesPerInstWord; 3648 } 3649 3650 // !!! If the instructions that get generated here change then function 3651 // instr_size_for_decode_klass_not_null() needs to get updated. 3652 void MacroAssembler::decode_klass_not_null(Register r) { 3653 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3654 // pd_code_size_limit. 3655 assert (UseCompressedClassPointers, "must be compressed"); 3656 if (Universe::narrow_klass_base() != NULL) { 3657 assert(r != G6_heapbase, "bad register choice"); 3658 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3659 if (Universe::narrow_klass_shift() != 0) 3660 sllx(r, LogKlassAlignmentInBytes, r); 3661 add(r, G6_heapbase, r); 3662 reinit_heapbase(); 3663 } else { 3664 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3665 sllx(r, Universe::narrow_klass_shift(), r); 3666 } 3667 } 3668 3669 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 3670 if (src == dst) { 3671 decode_klass_not_null(src); 3672 } else { 3673 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3674 // pd_code_size_limit. 3675 assert (UseCompressedClassPointers, "must be compressed"); 3676 if (Universe::narrow_klass_base() != NULL) { 3677 if (Universe::narrow_klass_shift() != 0) { 3678 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 3679 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3680 sllx(src, LogKlassAlignmentInBytes, dst); 3681 add(dst, G6_heapbase, dst); 3682 reinit_heapbase(); 3683 } else { 3684 set((intptr_t)Universe::narrow_klass_base(), dst); 3685 add(src, dst, dst); 3686 } 3687 } else { 3688 // shift/mov src into dst. 3689 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3690 sllx(src, Universe::narrow_klass_shift(), dst); 3691 } 3692 } 3693 } 3694 3695 void MacroAssembler::reinit_heapbase() { 3696 if (UseCompressedOops || UseCompressedClassPointers) { 3697 if (Universe::heap() != NULL) { 3698 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 3699 } else { 3700 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 3701 load_ptr_contents(base, G6_heapbase); 3702 } 3703 } 3704 } 3705 3706 #ifdef COMPILER2 3707 3708 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 3709 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 3710 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3711 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 3712 Label Lloop, Lslow; 3713 assert(UseVIS >= 3, "VIS3 is required"); 3714 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 3715 assert_different_registers(ftmp1, ftmp2, ftmp3); 3716 3717 // Check if cnt >= 8 (= 16 bytes) 3718 cmp(cnt, 8); 3719 br(Assembler::less, false, Assembler::pn, Lslow); 3720 delayed()->mov(cnt, result); // copy count 3721 3722 // Check for 8-byte alignment of src and dst 3723 or3(src, dst, tmp1); 3724 andcc(tmp1, 7, G0); 3725 br(Assembler::notZero, false, Assembler::pn, Lslow); 3726 delayed()->nop(); 3727 3728 // Set mask for bshuffle instruction 3729 Register mask = tmp4; 3730 set(0x13579bdf, mask); 3731 bmask(mask, G0, G0); 3732 3733 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 3734 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 3735 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 3736 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 3737 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 3738 3739 // Load first 8 bytes 3740 ldx(src, 0, tmp1); 3741 3742 bind(Lloop); 3743 // Load next 8 bytes 3744 ldx(src, 8, tmp2); 3745 3746 // Check for non-latin1 character by testing if the most significant byte of a char is set. 3747 // Although we have to move the data between integer and floating point registers, this is 3748 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 3749 or3(tmp1, tmp2, tmp3); 3750 btst(tmp3, mask); 3751 // annul zeroing if branch is not taken to preserve original count 3752 brx(Assembler::notZero, true, Assembler::pn, Ldone); 3753 delayed()->mov(G0, result); // 0 - failed 3754 3755 // Move bytes into float register 3756 movxtod(tmp1, ftmp1); 3757 movxtod(tmp2, ftmp2); 3758 3759 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 3760 bshuffle(ftmp1, ftmp2, ftmp3); 3761 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3762 3763 // Increment addresses and decrement count 3764 inc(src, 16); 3765 inc(dst, 8); 3766 dec(cnt, 8); 3767 3768 cmp(cnt, 8); 3769 // annul LDX if branch is not taken to prevent access past end of string 3770 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3771 delayed()->ldx(src, 0, tmp1); 3772 3773 // Fallback to slow version 3774 bind(Lslow); 3775 } 3776 3777 // Compress char[] to byte[]. Return 0 on failure. 3778 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 3779 Label Lloop; 3780 assert_different_registers(src, dst, cnt, tmp, result); 3781 3782 lduh(src, 0, tmp); 3783 3784 bind(Lloop); 3785 inc(src, sizeof(jchar)); 3786 cmp(tmp, 0xff); 3787 // annul zeroing if branch is not taken to preserve original count 3788 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 3789 delayed()->mov(G0, result); // 0 - failed 3790 deccc(cnt); 3791 stb(tmp, dst, 0); 3792 inc(dst); 3793 // annul LDUH if branch is not taken to prevent access past end of string 3794 br(Assembler::notZero, true, Assembler::pt, Lloop); 3795 delayed()->lduh(src, 0, tmp); // hoisted 3796 } 3797 3798 // Inflate byte[] to char[] by inflating 16 bytes at once. 3799 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 3800 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 3801 Label Lloop, Lslow; 3802 assert(UseVIS >= 3, "VIS3 is required"); 3803 assert_different_registers(src, dst, cnt, tmp); 3804 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 3805 3806 // Check if cnt >= 8 (= 16 bytes) 3807 cmp(cnt, 8); 3808 br(Assembler::less, false, Assembler::pn, Lslow); 3809 delayed()->nop(); 3810 3811 // Check for 8-byte alignment of src and dst 3812 or3(src, dst, tmp); 3813 andcc(tmp, 7, G0); 3814 br(Assembler::notZero, false, Assembler::pn, Lslow); 3815 // Initialize float register to zero 3816 FloatRegister zerof = ftmp4; 3817 delayed()->fzero(FloatRegisterImpl::D, zerof); 3818 3819 // Load first 8 bytes 3820 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3821 3822 bind(Lloop); 3823 inc(src, 8); 3824 dec(cnt, 8); 3825 3826 // Inflate the string by interleaving each byte from the source array 3827 // with a zero byte and storing the result in the destination array. 3828 fpmerge(zerof, ftmp1->successor(), ftmp2); 3829 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 3830 fpmerge(zerof, ftmp1, ftmp3); 3831 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3832 3833 inc(dst, 16); 3834 3835 cmp(cnt, 8); 3836 // annul LDX if branch is not taken to prevent access past end of string 3837 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3838 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3839 3840 // Fallback to slow version 3841 bind(Lslow); 3842 } 3843 3844 // Inflate byte[] to char[]. 3845 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 3846 Label Loop; 3847 assert_different_registers(src, dst, cnt, tmp); 3848 3849 ldub(src, 0, tmp); 3850 bind(Loop); 3851 inc(src); 3852 deccc(cnt); 3853 sth(tmp, dst, 0); 3854 inc(dst, sizeof(jchar)); 3855 // annul LDUB if branch is not taken to prevent access past end of string 3856 br(Assembler::notZero, true, Assembler::pt, Loop); 3857 delayed()->ldub(src, 0, tmp); // hoisted 3858 } 3859 3860 void MacroAssembler::string_compare(Register str1, Register str2, 3861 Register cnt1, Register cnt2, 3862 Register tmp1, Register tmp2, 3863 Register result, int ae) { 3864 Label Ldone, Lloop; 3865 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 3866 int stride1, stride2; 3867 3868 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 3869 // we interchange str1 and str2 in the UL case and negate the result. 3870 // Like this, str1 is always latin1 encoded, expect for the UU case. 3871 3872 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3873 srl(cnt2, 1, cnt2); 3874 } 3875 3876 // See if the lengths are different, and calculate min in cnt1. 3877 // Save diff in case we need it for a tie-breaker. 3878 Label Lskip; 3879 Register diff = tmp1; 3880 subcc(cnt1, cnt2, diff); 3881 br(Assembler::greater, true, Assembler::pt, Lskip); 3882 // cnt2 is shorter, so use its count: 3883 delayed()->mov(cnt2, cnt1); 3884 bind(Lskip); 3885 3886 // Rename registers 3887 Register limit1 = cnt1; 3888 Register limit2 = limit1; 3889 Register chr1 = result; 3890 Register chr2 = cnt2; 3891 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3892 // We need an additional register to keep track of two limits 3893 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 3894 limit2 = tmp2; 3895 } 3896 3897 // Is the minimum length zero? 3898 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 3899 br(Assembler::equal, true, Assembler::pn, Ldone); 3900 // result is difference in lengths 3901 if (ae == StrIntrinsicNode::UU) { 3902 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 3903 } else { 3904 delayed()->mov(diff, result); 3905 } 3906 3907 // Load first characters 3908 if (ae == StrIntrinsicNode::LL) { 3909 stride1 = stride2 = sizeof(jbyte); 3910 ldub(str1, 0, chr1); 3911 ldub(str2, 0, chr2); 3912 } else if (ae == StrIntrinsicNode::UU) { 3913 stride1 = stride2 = sizeof(jchar); 3914 lduh(str1, 0, chr1); 3915 lduh(str2, 0, chr2); 3916 } else { 3917 stride1 = sizeof(jbyte); 3918 stride2 = sizeof(jchar); 3919 ldub(str1, 0, chr1); 3920 lduh(str2, 0, chr2); 3921 } 3922 3923 // Compare first characters 3924 subcc(chr1, chr2, chr1); 3925 br(Assembler::notZero, false, Assembler::pt, Ldone); 3926 assert(chr1 == result, "result must be pre-placed"); 3927 delayed()->nop(); 3928 3929 // Check if the strings start at same location 3930 cmp(str1, str2); 3931 brx(Assembler::equal, true, Assembler::pn, Ldone); 3932 delayed()->mov(G0, result); // result is zero 3933 3934 // We have no guarantee that on 64 bit the higher half of limit is 0 3935 signx(limit1); 3936 3937 // Get limit 3938 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3939 sll(limit1, 1, limit2); 3940 subcc(limit2, stride2, chr2); 3941 } 3942 subcc(limit1, stride1, chr1); 3943 br(Assembler::zero, true, Assembler::pn, Ldone); 3944 // result is difference in lengths 3945 if (ae == StrIntrinsicNode::UU) { 3946 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 3947 } else { 3948 delayed()->mov(diff, result); 3949 } 3950 3951 // Shift str1 and str2 to the end of the arrays, negate limit 3952 add(str1, limit1, str1); 3953 add(str2, limit2, str2); 3954 neg(chr1, limit1); // limit1 = -(limit1-stride1) 3955 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3956 neg(chr2, limit2); // limit2 = -(limit2-stride2) 3957 } 3958 3959 // Compare the rest of the characters 3960 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 3961 3962 bind(Lloop); 3963 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 3964 3965 subcc(chr1, chr2, chr1); 3966 br(Assembler::notZero, false, Assembler::pt, Ldone); 3967 assert(chr1 == result, "result must be pre-placed"); 3968 delayed()->inccc(limit1, stride1); 3969 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3970 inccc(limit2, stride2); 3971 } 3972 3973 // annul LDUB if branch is not taken to prevent access past end of string 3974 br(Assembler::notZero, true, Assembler::pt, Lloop); 3975 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 3976 3977 // If strings are equal up to min length, return the length difference. 3978 if (ae == StrIntrinsicNode::UU) { 3979 // Divide by 2 to get number of chars 3980 sra(diff, 1, result); 3981 } else { 3982 mov(diff, result); 3983 } 3984 3985 // Otherwise, return the difference between the first mismatched chars. 3986 bind(Ldone); 3987 if(ae == StrIntrinsicNode::UL) { 3988 // Negate result (see note above) 3989 neg(result); 3990 } 3991 } 3992 3993 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 3994 Register limit, Register tmp, Register result, bool is_byte) { 3995 Label Ldone, Lloop, Lremaining; 3996 assert_different_registers(ary1, ary2, limit, tmp, result); 3997 3998 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3999 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4000 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4001 4002 if (is_array_equ) { 4003 // return true if the same array 4004 cmp(ary1, ary2); 4005 brx(Assembler::equal, true, Assembler::pn, Ldone); 4006 delayed()->mov(1, result); // equal 4007 4008 br_null(ary1, true, Assembler::pn, Ldone); 4009 delayed()->clr(result); // not equal 4010 4011 br_null(ary2, true, Assembler::pn, Ldone); 4012 delayed()->clr(result); // not equal 4013 4014 // load the lengths of arrays 4015 ld(Address(ary1, length_offset), limit); 4016 ld(Address(ary2, length_offset), tmp); 4017 4018 // return false if the two arrays are not equal length 4019 cmp(limit, tmp); 4020 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4021 delayed()->clr(result); // not equal 4022 } 4023 4024 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4025 delayed()->mov(1, result); // zero-length arrays are equal 4026 4027 if (is_array_equ) { 4028 // load array addresses 4029 add(ary1, base_offset, ary1); 4030 add(ary2, base_offset, ary2); 4031 // set byte count 4032 if (!is_byte) { 4033 sll(limit, exact_log2(sizeof(jchar)), limit); 4034 } 4035 } else { 4036 // We have no guarantee that on 64 bit the higher half of limit is 0 4037 signx(limit); 4038 } 4039 4040 #ifdef ASSERT 4041 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4042 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4043 Label Laligned; 4044 or3(ary1, ary2, tmp); 4045 andcc(tmp, 7, tmp); 4046 br_null_short(tmp, Assembler::pn, Laligned); 4047 STOP("First array element is not 8-byte aligned."); 4048 should_not_reach_here(); 4049 bind(Laligned); 4050 #endif 4051 4052 // Shift ary1 and ary2 to the end of the arrays, negate limit 4053 add(ary1, limit, ary1); 4054 add(ary2, limit, ary2); 4055 neg(limit, limit); 4056 4057 // MAIN LOOP 4058 // Load and compare array elements of size 'byte_width' until the elements are not 4059 // equal or we reached the end of the arrays. If the size of the arrays is not a 4060 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4061 // compare the remaining bytes below by skipping the garbage bytes. 4062 ldx(ary1, limit, result); 4063 bind(Lloop); 4064 ldx(ary2, limit, tmp); 4065 inccc(limit, 8); 4066 // Bail out if we reached the end (but still do the comparison) 4067 br(Assembler::positive, false, Assembler::pn, Lremaining); 4068 delayed()->cmp(result, tmp); 4069 // Check equality of elements 4070 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4071 delayed()->ldx(ary1, limit, result); 4072 4073 ba(Ldone); 4074 delayed()->clr(result); // not equal 4075 4076 // TAIL COMPARISON 4077 // We got here because we reached the end of the arrays. 'limit' is the number of 4078 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4079 // out the garbage and compare the remaining elements. 4080 bind(Lremaining); 4081 // Optimistic shortcut: elements potentially including garbage are equal 4082 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4083 delayed()->mov(1, result); // equal 4084 // Shift 'limit' bytes to the right and compare 4085 sll(limit, 3, limit); // bytes to bits 4086 srlx(result, limit, result); 4087 srlx(tmp, limit, tmp); 4088 cmp(result, tmp); 4089 clr(result); 4090 movcc(Assembler::equal, false, xcc, 1, result); 4091 4092 bind(Ldone); 4093 } 4094 4095 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4096 4097 // test for negative bytes in input string of a given size 4098 // result 1 if found, 0 otherwise. 4099 4100 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4101 4102 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4103 4104 Register i = result; // result used as integer index i until very end 4105 Register lmask = t2; // t2 is aliased to lmask 4106 4107 // INITIALIZATION 4108 // =========================================================== 4109 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4110 // compute unaligned offset -> i 4111 // compute core end index -> t5 4112 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4113 add(t2, 0x80, t2); 4114 sllx(t2, 32, t3); 4115 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4116 sra(size,0,size); 4117 andcc(inp, 0x7, i); // unaligned offset -> i 4118 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4119 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4120 4121 // =========================================================== 4122 4123 // UNALIGNED HEAD 4124 // =========================================================== 4125 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4126 // * obliterate (ignore) bytes outside string by shifting off reg ends 4127 // * compare with bitmask, short circuit return true if one or more high 4128 // bits set. 4129 cmp(size, 0); 4130 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4131 delayed()->mov(0,result); // annuled so i not clobbered for following 4132 neg(i, t4); 4133 add(i, size, t5); 4134 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4135 mov(8, t4); 4136 sub(t4, t5, t4); 4137 sra(t4, 31, t5); 4138 andn(t4, t5, t5); 4139 add(i, t5, t4); 4140 sll(t5, 3, t5); 4141 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4142 srlx(t3, t5, t3); 4143 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4144 andcc(lmask, t3, G0); 4145 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4146 delayed()->mov(1,result); // annuled so i not clobbered for following 4147 add(size, -8, t5); // core end index -> t5 4148 mov(8, t4); 4149 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4150 // =========================================================== 4151 4152 // ALIGNED CORE 4153 // =========================================================== 4154 // * iterate index i over aligned 8B sections of core, comparing with 4155 // bitmask, short circuit return true if one or more high bits set 4156 // t5 contains core end index/loop limit which is the index 4157 // of the MSB of last (unaligned) 8B fully contained in the string. 4158 // inp contains address of first byte in string/array 4159 // lmask contains 8B high bit mask for comparison 4160 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4161 bind(Lcore); 4162 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4163 bind(Lcore_rpt); 4164 ldx(inp, i, t3); 4165 andcc(t3, lmask, G0); 4166 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4167 delayed()->mov(1, result); // annuled so i not clobbered for following 4168 add(i, 8, i); 4169 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4170 // =========================================================== 4171 4172 // ALIGNED TAIL (<8B) 4173 // =========================================================== 4174 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4175 // string bytes by shifting them off end, compare what's left with bitmask 4176 // inp contains address of first byte in string/array 4177 // lmask contains 8B high bit mask for comparison 4178 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4179 bind(Ltail); 4180 subcc(size, i, t4); // # of remaining bytes in string -> t4 4181 // return 0 if no more remaining bytes 4182 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4183 delayed()->mov(0, result); // annuled so i not clobbered for following 4184 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4185 mov(8, t5); 4186 sub(t5, t4, t4); 4187 mov(0, result); // ** i clobbered at this point 4188 sll(t4, 3, t4); // bits beyond end of string -> t4 4189 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4190 andcc(lmask, t3, G0); 4191 movcc(Assembler::notZero, false, xcc, 1, result); 4192 bind(Lreturn); 4193 } 4194 4195 #endif 4196 4197 4198 // Use BIS for zeroing (count is in bytes). 4199 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4200 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 4201 Register end = count; 4202 int cache_line_size = VM_Version::prefetch_data_size(); 4203 assert(cache_line_size > 0, "cache line size should be known for this code"); 4204 // Minimum count when BIS zeroing can be used since 4205 // it needs membar which is expensive. 4206 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4207 4208 Label small_loop; 4209 // Check if count is negative (dead code) or zero. 4210 // Note, count uses 64bit in 64 bit VM. 4211 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4212 4213 // Use BIS zeroing only for big arrays since it requires membar. 4214 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4215 cmp(count, block_zero_size); 4216 } else { 4217 set(block_zero_size, temp); 4218 cmp(count, temp); 4219 } 4220 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4221 delayed()->add(to, count, end); 4222 4223 // Note: size is >= three (32 bytes) cache lines. 4224 4225 // Clean the beginning of space up to next cache line. 4226 for (int offs = 0; offs < cache_line_size; offs += 8) { 4227 stx(G0, to, offs); 4228 } 4229 4230 // align to next cache line 4231 add(to, cache_line_size, to); 4232 and3(to, -cache_line_size, to); 4233 4234 // Note: size left >= two (32 bytes) cache lines. 4235 4236 // BIS should not be used to zero tail (64 bytes) 4237 // to avoid zeroing a header of the following object. 4238 sub(end, (cache_line_size*2)-8, end); 4239 4240 Label bis_loop; 4241 bind(bis_loop); 4242 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4243 add(to, cache_line_size, to); 4244 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4245 4246 // BIS needs membar. 4247 membar(Assembler::StoreLoad); 4248 4249 add(end, (cache_line_size*2)-8, end); // restore end 4250 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4251 4252 // Clean the tail. 4253 bind(small_loop); 4254 stx(G0, to, 0); 4255 add(to, 8, to); 4256 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4257 nop(); // Separate short branches 4258 } 4259 4260 /** 4261 * Update CRC-32[C] with a byte value according to constants in table 4262 * 4263 * @param [in,out]crc Register containing the crc. 4264 * @param [in]val Register containing the byte to fold into the CRC. 4265 * @param [in]table Register containing the table of crc constants. 4266 * 4267 * uint32_t crc; 4268 * val = crc_table[(val ^ crc) & 0xFF]; 4269 * crc = val ^ (crc >> 8); 4270 */ 4271 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4272 xor3(val, crc, val); 4273 and3(val, 0xFF, val); 4274 sllx(val, 2, val); 4275 lduw(table, val, val); 4276 srlx(crc, 8, crc); 4277 xor3(val, crc, crc); 4278 } 4279 4280 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4281 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4282 srlx(src, 24, dst); 4283 4284 sllx(src, 32+8, tmp); 4285 srlx(tmp, 32+24, tmp); 4286 sllx(tmp, 8, tmp); 4287 or3(dst, tmp, dst); 4288 4289 sllx(src, 32+16, tmp); 4290 srlx(tmp, 32+24, tmp); 4291 sllx(tmp, 16, tmp); 4292 or3(dst, tmp, dst); 4293 4294 sllx(src, 32+24, tmp); 4295 srlx(tmp, 32, tmp); 4296 or3(dst, tmp, dst); 4297 } 4298 4299 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4300 reverse_bytes_32(src, tmp1, tmp2); 4301 movxtod(tmp1, dst); 4302 } 4303 4304 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4305 movdtox(src, tmp1); 4306 reverse_bytes_32(tmp1, dst, tmp2); 4307 } 4308 4309 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4310 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4311 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4312 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4313 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4314 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4315 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4316 ldxl(buf, G0, xtmp_lo); 4317 inc(buf, 8); 4318 ldxl(buf, G0, xtmp_hi); 4319 inc(buf, 8); 4320 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4321 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4322 } 4323 4324 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4325 mov(xcrc_lo, xtmp_lo); 4326 mov(xcrc_hi, xtmp_hi); 4327 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4328 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4329 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4330 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4331 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4332 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4333 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4334 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4335 } 4336 4337 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4338 and3(xcrc, 0xFF, tmp); 4339 sllx(tmp, 2, tmp); 4340 lduw(table, tmp, xtmp); 4341 srlx(xcrc, 8, xcrc); 4342 xor3(xtmp, xcrc, xcrc); 4343 } 4344 4345 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4346 and3(crc, 0xFF, tmp); 4347 srlx(crc, 8, crc); 4348 sllx(tmp, 2, tmp); 4349 lduw(table, tmp, tmp); 4350 xor3(tmp, crc, crc); 4351 } 4352 4353 #define CRC32_TMP_REG_NUM 18 4354 4355 #define CRC32_CONST_64 0x163cd6124 4356 #define CRC32_CONST_96 0x0ccaa009e 4357 #define CRC32_CONST_160 0x1751997d0 4358 #define CRC32_CONST_480 0x1c6e41596 4359 #define CRC32_CONST_544 0x154442bd4 4360 4361 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4362 4363 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4364 Label L_main_loop_prologue; 4365 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4366 Label L_fold_tail, L_fold_tail_loop; 4367 Label L_8byte_fold_loop, L_8byte_fold_check; 4368 4369 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4370 4371 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4372 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4373 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4374 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4375 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4376 4377 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4378 4379 not1(crc); // ~c 4380 clruwu(crc); // clear upper 32 bits of crc 4381 4382 // Check if below cutoff, proceed directly to cleanup code 4383 mov(31, G4); 4384 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4385 4386 // Align buffer to 8 byte boundry 4387 mov(8, O5); 4388 and3(buf, 0x7, O4); 4389 sub(O5, O4, O5); 4390 and3(O5, 0x7, O5); 4391 sub(len, O5, len); 4392 ba(L_align_check); 4393 delayed()->nop(); 4394 4395 // Alignment loop, table look up method for up to 7 bytes 4396 bind(L_align_loop); 4397 ldub(buf, 0, O4); 4398 inc(buf); 4399 dec(O5); 4400 xor3(O4, crc, O4); 4401 and3(O4, 0xFF, O4); 4402 sllx(O4, 2, O4); 4403 lduw(table, O4, O4); 4404 srlx(crc, 8, crc); 4405 xor3(O4, crc, crc); 4406 bind(L_align_check); 4407 nop(); 4408 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4409 4410 // Aligned on 64-bit (8-byte) boundry at this point 4411 // Check if still above cutoff (31-bytes) 4412 mov(31, G4); 4413 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4414 // At least 32 bytes left to process 4415 4416 // Free up registers by storing them to FP registers 4417 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4418 movxtod(tmp[i], as_FloatRegister(2*i)); 4419 } 4420 4421 // Determine which loop to enter 4422 // Shared prologue 4423 ldxl(buf, G0, tmp[0]); 4424 inc(buf, 8); 4425 ldxl(buf, G0, tmp[1]); 4426 inc(buf, 8); 4427 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4428 and3(crc, 0, crc); // Clear out the crc register 4429 // Main loop needs 128-bytes at least 4430 mov(128, G4); 4431 mov(64, tmp[2]); 4432 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4433 // Less than 64 bytes 4434 nop(); 4435 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4436 // Between 64 and 127 bytes 4437 set64(CRC32_CONST_96, const_96, tmp[8]); 4438 set64(CRC32_CONST_160, const_160, tmp[9]); 4439 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4440 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4441 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4442 dec(len, 48); 4443 ba(L_fold_tail); 4444 delayed()->nop(); 4445 4446 bind(L_main_loop_prologue); 4447 for (int i = 2; i < 8; i++) { 4448 ldxl(buf, G0, tmp[i]); 4449 inc(buf, 8); 4450 } 4451 4452 // Fold total 512 bits of polynomial on each iteration, 4453 // 128 bits per each of 4 parallel streams 4454 set64(CRC32_CONST_480, const_480, tmp[8]); 4455 set64(CRC32_CONST_544, const_544, tmp[9]); 4456 4457 mov(128, G4); 4458 bind(L_fold_512b_loop); 4459 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4460 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4461 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4462 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4463 dec(len, 64); 4464 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4465 4466 // Fold 512 bits to 128 bits 4467 bind(L_fold_512b); 4468 set64(CRC32_CONST_96, const_96, tmp[8]); 4469 set64(CRC32_CONST_160, const_160, tmp[9]); 4470 4471 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4472 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4473 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4474 dec(len, 48); 4475 4476 // Fold the rest of 128 bits data chunks 4477 bind(L_fold_tail); 4478 mov(32, G4); 4479 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4480 4481 set64(CRC32_CONST_96, const_96, tmp[8]); 4482 set64(CRC32_CONST_160, const_160, tmp[9]); 4483 4484 bind(L_fold_tail_loop); 4485 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4486 sub(len, 16, len); 4487 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4488 4489 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4490 bind(L_fold_128b); 4491 4492 set64(CRC32_CONST_64, const_64, tmp[4]); 4493 4494 xmulx(const_64, tmp[0], tmp[2]); 4495 xmulxhi(const_64, tmp[0], tmp[3]); 4496 4497 srl(tmp[2], G0, tmp[4]); 4498 xmulx(const_64, tmp[4], tmp[4]); 4499 4500 srlx(tmp[2], 32, tmp[2]); 4501 sllx(tmp[3], 32, tmp[3]); 4502 or3(tmp[2], tmp[3], tmp[2]); 4503 4504 xor3(tmp[4], tmp[1], tmp[4]); 4505 xor3(tmp[4], tmp[2], tmp[1]); 4506 dec(len, 8); 4507 4508 // Use table lookup for the 8 bytes left in tmp[1] 4509 dec(len, 8); 4510 4511 // 8 8-bit folds to compute 32-bit CRC. 4512 for (int j = 0; j < 4; j++) { 4513 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4514 } 4515 srl(tmp[1], G0, crc); // move 32 bits to general register 4516 for (int j = 0; j < 4; j++) { 4517 fold_8bit_crc32(crc, table, tmp[3]); 4518 } 4519 4520 bind(L_8byte_fold_check); 4521 4522 // Restore int registers saved in FP registers 4523 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4524 movdtox(as_FloatRegister(2*i), tmp[i]); 4525 } 4526 4527 ba(L_cleanup_check); 4528 delayed()->nop(); 4529 4530 // Table look-up method for the remaining few bytes 4531 bind(L_cleanup_loop); 4532 ldub(buf, 0, O4); 4533 inc(buf); 4534 dec(len); 4535 xor3(O4, crc, O4); 4536 and3(O4, 0xFF, O4); 4537 sllx(O4, 2, O4); 4538 lduw(table, O4, O4); 4539 srlx(crc, 8, crc); 4540 xor3(O4, crc, crc); 4541 bind(L_cleanup_check); 4542 nop(); 4543 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4544 4545 not1(crc); 4546 } 4547 4548 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4549 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4550 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4551 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4552 4553 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4554 4555 Label L_crc32c_head, L_crc32c_aligned; 4556 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4557 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4558 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4559 4560 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4561 4562 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4563 4564 // clear upper 32 bits of crc 4565 clruwu(crc); 4566 4567 and3(buf, 7, G4); 4568 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4569 4570 mov(8, G1); 4571 sub(G1, G4, G4); 4572 4573 // ------ process the misaligned head (7 bytes or less) ------ 4574 bind(L_crc32c_head); 4575 4576 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4577 ldub(buf, 0, G1); 4578 update_byte_crc32(crc, G1, table); 4579 4580 inc(buf); 4581 dec(len); 4582 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4583 dec(G4); 4584 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4585 4586 // ------ process the 8-byte-aligned body ------ 4587 bind(L_crc32c_aligned); 4588 nop(); 4589 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4590 4591 // reverse the byte order of lower 32 bits to big endian, and move to FP side 4592 movitof_revbytes(crc, F0, G1, G3); 4593 4594 set(CHUNK_LEN*8*4, G4); 4595 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 4596 4597 // ------ process four 1KB chunks in parallel ------ 4598 bind(L_crc32c_parallel); 4599 4600 fzero(FloatRegisterImpl::D, F2); 4601 fzero(FloatRegisterImpl::D, F4); 4602 fzero(FloatRegisterImpl::D, F6); 4603 4604 mov(CHUNK_LEN - 1, G4); 4605 bind(L_crc32c_parallel_loop); 4606 // schedule ldf's ahead of crc32c's to hide the load-use latency 4607 ldf(FloatRegisterImpl::D, buf, 0, F8); 4608 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4609 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4610 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 4611 crc32c(F0, F8, F0); 4612 crc32c(F2, F10, F2); 4613 crc32c(F4, F12, F4); 4614 crc32c(F6, F14, F6); 4615 inc(buf, 8); 4616 dec(G4); 4617 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 4618 4619 ldf(FloatRegisterImpl::D, buf, 0, F8); 4620 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4621 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4622 crc32c(F0, F8, F0); 4623 crc32c(F2, F10, F2); 4624 crc32c(F4, F12, F4); 4625 4626 inc(buf, CHUNK_LEN*24); 4627 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 4628 inc(buf, 8); 4629 4630 prefetch(buf, 0, Assembler::severalReads); 4631 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 4632 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 4633 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 4634 4635 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4636 movftoi_revbytes(F0, O4, G1, G4); 4637 movftoi_revbytes(F2, O5, G1, G4); 4638 movftoi_revbytes(F4, G5, G1, G4); 4639 4640 // combine the results of 4 chunks 4641 set64(CHUNK_K1, G3, G1); 4642 xmulx(O4, G3, O4); 4643 set64(CHUNK_K2, G3, G1); 4644 xmulx(O5, G3, O5); 4645 set64(CHUNK_K3, G3, G1); 4646 xmulx(G5, G3, G5); 4647 4648 movdtox(F14, G4); 4649 xor3(O4, O5, O5); 4650 xor3(G5, O5, O5); 4651 xor3(G4, O5, O5); 4652 4653 // reverse the byte order to big endian, via stack, and move to FP side 4654 // TODO: use new revb instruction 4655 add(SP, -8, G1); 4656 srlx(G1, 3, G1); 4657 sllx(G1, 3, G1); 4658 stx(O5, G1, G0); 4659 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 4660 4661 crc32c(F6, F2, F0); 4662 4663 set(CHUNK_LEN*8*4, G4); 4664 sub(len, G4, len); 4665 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 4666 nop(); 4667 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 4668 4669 bind(L_crc32c_serial); 4670 4671 mov(32, G4); 4672 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 4673 4674 // ------ process 32B chunks ------ 4675 bind(L_crc32c_x32_loop); 4676 ldf(FloatRegisterImpl::D, buf, 0, F2); 4677 crc32c(F0, F2, F0); 4678 ldf(FloatRegisterImpl::D, buf, 8, F2); 4679 crc32c(F0, F2, F0); 4680 ldf(FloatRegisterImpl::D, buf, 16, F2); 4681 crc32c(F0, F2, F0); 4682 ldf(FloatRegisterImpl::D, buf, 24, F2); 4683 inc(buf, 32); 4684 crc32c(F0, F2, F0); 4685 dec(len, 32); 4686 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 4687 4688 bind(L_crc32c_x8); 4689 nop(); 4690 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 4691 4692 // ------ process 8B chunks ------ 4693 bind(L_crc32c_x8_loop); 4694 ldf(FloatRegisterImpl::D, buf, 0, F2); 4695 inc(buf, 8); 4696 crc32c(F0, F2, F0); 4697 dec(len, 8); 4698 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 4699 4700 bind(L_crc32c_done); 4701 4702 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4703 movftoi_revbytes(F0, crc, G1, G3); 4704 4705 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 4706 4707 // ------ process the misaligned tail (7 bytes or less) ------ 4708 bind(L_crc32c_tail); 4709 4710 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4711 ldub(buf, 0, G1); 4712 update_byte_crc32(crc, G1, table); 4713 4714 inc(buf); 4715 dec(len); 4716 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 4717 4718 bind(L_crc32c_return); 4719 nop(); 4720 }