1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetCodeGen.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/interfaceSupport.hpp" 39 #include "runtime/objectMonitor.hpp" 40 #include "runtime/os.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "utilities/macros.hpp" 44 #ifdef COMPILER2 45 #include "opto/intrinsicnode.hpp" 46 #endif 47 48 #ifdef PRODUCT 49 #define BLOCK_COMMENT(str) /* nothing */ 50 #define STOP(error) stop(error) 51 #else 52 #define BLOCK_COMMENT(str) block_comment(str) 53 #define STOP(error) block_comment(error); stop(error) 54 #endif 55 56 // Convert the raw encoding form into the form expected by the 57 // constructor for Address. 58 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 59 assert(scale == 0, "not supported"); 60 RelocationHolder rspec; 61 if (disp_reloc != relocInfo::none) { 62 rspec = Relocation::spec_simple(disp_reloc); 63 } 64 65 Register rindex = as_Register(index); 66 if (rindex != G0) { 67 Address madr(as_Register(base), rindex); 68 madr._rspec = rspec; 69 return madr; 70 } else { 71 Address madr(as_Register(base), disp); 72 madr._rspec = rspec; 73 return madr; 74 } 75 } 76 77 Address Argument::address_in_frame() const { 78 // Warning: In LP64 mode disp will occupy more than 10 bits, but 79 // op codes such as ld or ldx, only access disp() to get 80 // their simm13 argument. 81 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 82 if (is_in()) 83 return Address(FP, disp); // In argument. 84 else 85 return Address(SP, disp); // Out argument. 86 } 87 88 static const char* argumentNames[][2] = { 89 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 90 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 91 {"A(n>9)","P(n>9)"} 92 }; 93 94 const char* Argument::name() const { 95 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 96 int num = number(); 97 if (num >= nofArgs) num = nofArgs - 1; 98 return argumentNames[num][is_in() ? 1 : 0]; 99 } 100 101 #ifdef ASSERT 102 // On RISC, there's no benefit to verifying instruction boundaries. 103 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 104 #endif 105 106 // Patch instruction inst at offset inst_pos to refer to dest_pos 107 // and return the resulting instruction. 108 // We should have pcs, not offsets, but since all is relative, it will work out 109 // OK. 110 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 111 int m; // mask for displacement field 112 int v; // new value for displacement field 113 const int word_aligned_ones = -4; 114 switch (inv_op(inst)) { 115 default: ShouldNotReachHere(); 116 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 117 case branch_op: 118 switch (inv_op2(inst)) { 119 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 120 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 121 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 122 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 123 case bpr_op2: { 124 if (is_cbcond(inst)) { 125 m = wdisp10(word_aligned_ones, 0); 126 v = wdisp10(dest_pos, inst_pos); 127 } else { 128 m = wdisp16(word_aligned_ones, 0); 129 v = wdisp16(dest_pos, inst_pos); 130 } 131 break; 132 } 133 default: ShouldNotReachHere(); 134 } 135 } 136 return inst & ~m | v; 137 } 138 139 // Return the offset of the branch destionation of instruction inst 140 // at offset pos. 141 // Should have pcs, but since all is relative, it works out. 142 int MacroAssembler::branch_destination(int inst, int pos) { 143 int r; 144 switch (inv_op(inst)) { 145 default: ShouldNotReachHere(); 146 case call_op: r = inv_wdisp(inst, pos, 30); break; 147 case branch_op: 148 switch (inv_op2(inst)) { 149 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 150 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 151 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 152 case br_op2: r = inv_wdisp( inst, pos, 22); break; 153 case bpr_op2: { 154 if (is_cbcond(inst)) { 155 r = inv_wdisp10(inst, pos); 156 } else { 157 r = inv_wdisp16(inst, pos); 158 } 159 break; 160 } 161 default: ShouldNotReachHere(); 162 } 163 } 164 return r; 165 } 166 167 void MacroAssembler::resolve_jobject(Register value, 168 Register tmp) { 169 BarrierSetCodeGen* code_gen = Universe::heap()->barrier_set()->code_gen(); 170 Label done, not_weak; 171 br_null(value, true, Assembler::pn, done); // Use NULL as-is. 172 delayed()->andcc(value, JNIHandles::weak_tag_mask, G0); // Test for jweak 173 br(Assembler::zero, false, Assembler::pt, not_weak); 174 delayed()->nop(); 175 code_gen->load_at(this, ACCESS_ON_ROOT | GC_ACCESS_ON_PHANTOM, T_OBJECT, 176 value, noreg, 0, value, tmp); 177 verify_oop(value); 178 br(Assembler::always, false, Assembler::pt, done); 179 delayed()->nop(); 180 bind(not_weak); 181 code_gen->load_at(this, ACCESS_ON_ROOT | GC_ACCESS_ON_STRONG, T_OBJECT, 182 value, noreg, -JNIHandles::weak_tag_value, value, tmp); 183 verify_oop(value); 184 bind(done); 185 } 186 187 void MacroAssembler::null_check(Register reg, int offset) { 188 if (needs_explicit_null_check((intptr_t)offset)) { 189 // provoke OS NULL exception if reg = NULL by 190 // accessing M[reg] w/o changing any registers 191 ld_ptr(reg, 0, G0); 192 } 193 else { 194 // nothing to do, (later) access of M[reg + offset] 195 // will provoke OS NULL exception if reg = NULL 196 } 197 } 198 199 // Ring buffer jumps 200 201 202 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 203 assert_not_delayed(); 204 jmpl(r1, r2, G0); 205 } 206 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 207 assert_not_delayed(); 208 jmp(r1, offset); 209 } 210 211 // This code sequence is relocatable to any address, even on LP64. 212 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 213 assert_not_delayed(); 214 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 215 // variable length instruction streams. 216 patchable_sethi(addrlit, temp); 217 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 218 jmpl(a.base(), a.disp(), d); 219 } 220 221 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 222 jumpl(addrlit, temp, G0, offset, file, line); 223 } 224 225 226 // Conditional breakpoint (for assertion checks in assembly code) 227 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 228 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 229 } 230 231 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 232 void MacroAssembler::breakpoint_trap() { 233 trap(ST_RESERVED_FOR_USER_0); 234 } 235 236 // Write serialization page so VM thread can do a pseudo remote membar 237 // We use the current thread pointer to calculate a thread specific 238 // offset to write to within the page. This minimizes bus traffic 239 // due to cache line collision. 240 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 241 srl(thread, os::get_serialize_page_shift_count(), tmp2); 242 if (Assembler::is_simm13(os::vm_page_size())) { 243 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 244 } 245 else { 246 set((os::vm_page_size() - sizeof(int)), tmp1); 247 and3(tmp2, tmp1, tmp2); 248 } 249 set(os::get_memory_serialize_page(), tmp1); 250 st(G0, tmp1, tmp2); 251 } 252 253 254 255 void MacroAssembler::enter() { 256 Unimplemented(); 257 } 258 259 void MacroAssembler::leave() { 260 Unimplemented(); 261 } 262 263 // Calls to C land 264 265 #ifdef ASSERT 266 // a hook for debugging 267 static Thread* reinitialize_thread() { 268 return Thread::current(); 269 } 270 #else 271 #define reinitialize_thread Thread::current 272 #endif 273 274 #ifdef ASSERT 275 address last_get_thread = NULL; 276 #endif 277 278 // call this when G2_thread is not known to be valid 279 void MacroAssembler::get_thread() { 280 save_frame(0); // to avoid clobbering O0 281 mov(G1, L0); // avoid clobbering G1 282 mov(G5_method, L1); // avoid clobbering G5 283 mov(G3, L2); // avoid clobbering G3 also 284 mov(G4, L5); // avoid clobbering G4 285 #ifdef ASSERT 286 AddressLiteral last_get_thread_addrlit(&last_get_thread); 287 set(last_get_thread_addrlit, L3); 288 rdpc(L4); 289 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 290 #endif 291 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 292 delayed()->nop(); 293 mov(L0, G1); 294 mov(L1, G5_method); 295 mov(L2, G3); 296 mov(L5, G4); 297 restore(O0, 0, G2_thread); 298 } 299 300 static Thread* verify_thread_subroutine(Thread* gthread_value) { 301 Thread* correct_value = Thread::current(); 302 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 303 return correct_value; 304 } 305 306 void MacroAssembler::verify_thread() { 307 if (VerifyThread) { 308 // NOTE: this chops off the heads of the 64-bit O registers. 309 // make sure G2_thread contains the right value 310 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 311 mov(G1, L1); // avoid clobbering G1 312 // G2 saved below 313 mov(G3, L3); // avoid clobbering G3 314 mov(G4, L4); // avoid clobbering G4 315 mov(G5_method, L5); // avoid clobbering G5_method 316 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 317 delayed()->mov(G2_thread, O0); 318 319 mov(L1, G1); // Restore G1 320 // G2 restored below 321 mov(L3, G3); // restore G3 322 mov(L4, G4); // restore G4 323 mov(L5, G5_method); // restore G5_method 324 restore(O0, 0, G2_thread); 325 } 326 } 327 328 329 void MacroAssembler::save_thread(const Register thread_cache) { 330 verify_thread(); 331 if (thread_cache->is_valid()) { 332 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 333 mov(G2_thread, thread_cache); 334 } 335 if (VerifyThread) { 336 // smash G2_thread, as if the VM were about to anyway 337 set(0x67676767, G2_thread); 338 } 339 } 340 341 342 void MacroAssembler::restore_thread(const Register thread_cache) { 343 if (thread_cache->is_valid()) { 344 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 345 mov(thread_cache, G2_thread); 346 verify_thread(); 347 } else { 348 // do it the slow way 349 get_thread(); 350 } 351 } 352 353 354 // %%% maybe get rid of [re]set_last_Java_frame 355 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 356 assert_not_delayed(); 357 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 358 JavaFrameAnchor::flags_offset()); 359 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 360 361 // Always set last_Java_pc and flags first because once last_Java_sp is visible 362 // has_last_Java_frame is true and users will look at the rest of the fields. 363 // (Note: flags should always be zero before we get here so doesn't need to be set.) 364 365 #ifdef ASSERT 366 // Verify that flags was zeroed on return to Java 367 Label PcOk; 368 save_frame(0); // to avoid clobbering O0 369 ld_ptr(pc_addr, L0); 370 br_null_short(L0, Assembler::pt, PcOk); 371 STOP("last_Java_pc not zeroed before leaving Java"); 372 bind(PcOk); 373 374 // Verify that flags was zeroed on return to Java 375 Label FlagsOk; 376 ld(flags, L0); 377 tst(L0); 378 br(Assembler::zero, false, Assembler::pt, FlagsOk); 379 delayed() -> restore(); 380 STOP("flags not zeroed before leaving Java"); 381 bind(FlagsOk); 382 #endif /* ASSERT */ 383 // 384 // When returning from calling out from Java mode the frame anchor's last_Java_pc 385 // will always be set to NULL. It is set here so that if we are doing a call to 386 // native (not VM) that we capture the known pc and don't have to rely on the 387 // native call having a standard frame linkage where we can find the pc. 388 389 if (last_Java_pc->is_valid()) { 390 st_ptr(last_Java_pc, pc_addr); 391 } 392 393 #ifdef ASSERT 394 // Make sure that we have an odd stack 395 Label StackOk; 396 andcc(last_java_sp, 0x01, G0); 397 br(Assembler::notZero, false, Assembler::pt, StackOk); 398 delayed()->nop(); 399 STOP("Stack Not Biased in set_last_Java_frame"); 400 bind(StackOk); 401 #endif // ASSERT 402 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 403 add( last_java_sp, STACK_BIAS, G4_scratch ); 404 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 405 } 406 407 void MacroAssembler::reset_last_Java_frame(void) { 408 assert_not_delayed(); 409 410 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 411 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 412 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 413 414 #ifdef ASSERT 415 // check that it WAS previously set 416 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 417 ld_ptr(sp_addr, L0); 418 tst(L0); 419 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 420 restore(); 421 #endif // ASSERT 422 423 st_ptr(G0, sp_addr); 424 // Always return last_Java_pc to zero 425 st_ptr(G0, pc_addr); 426 // Always null flags after return to Java 427 st(G0, flags); 428 } 429 430 431 void MacroAssembler::call_VM_base( 432 Register oop_result, 433 Register thread_cache, 434 Register last_java_sp, 435 address entry_point, 436 int number_of_arguments, 437 bool check_exceptions) 438 { 439 assert_not_delayed(); 440 441 // determine last_java_sp register 442 if (!last_java_sp->is_valid()) { 443 last_java_sp = SP; 444 } 445 // debugging support 446 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 447 448 // 64-bit last_java_sp is biased! 449 set_last_Java_frame(last_java_sp, noreg); 450 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 451 save_thread(thread_cache); 452 // do the call 453 call(entry_point, relocInfo::runtime_call_type); 454 if (!VerifyThread) 455 delayed()->mov(G2_thread, O0); // pass thread as first argument 456 else 457 delayed()->nop(); // (thread already passed) 458 restore_thread(thread_cache); 459 reset_last_Java_frame(); 460 461 // check for pending exceptions. use Gtemp as scratch register. 462 if (check_exceptions) { 463 check_and_forward_exception(Gtemp); 464 } 465 466 #ifdef ASSERT 467 set(badHeapWordVal, G3); 468 set(badHeapWordVal, G4); 469 set(badHeapWordVal, G5); 470 #endif 471 472 // get oop result if there is one and reset the value in the thread 473 if (oop_result->is_valid()) { 474 get_vm_result(oop_result); 475 } 476 } 477 478 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 479 { 480 Label L; 481 482 check_and_handle_popframe(scratch_reg); 483 check_and_handle_earlyret(scratch_reg); 484 485 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 486 ld_ptr(exception_addr, scratch_reg); 487 br_null_short(scratch_reg, pt, L); 488 // we use O7 linkage so that forward_exception_entry has the issuing PC 489 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 490 delayed()->nop(); 491 bind(L); 492 } 493 494 495 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 496 } 497 498 499 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 500 } 501 502 503 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 504 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 505 } 506 507 508 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 509 // O0 is reserved for the thread 510 mov(arg_1, O1); 511 call_VM(oop_result, entry_point, 1, check_exceptions); 512 } 513 514 515 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 516 // O0 is reserved for the thread 517 mov(arg_1, O1); 518 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 519 call_VM(oop_result, entry_point, 2, check_exceptions); 520 } 521 522 523 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 524 // O0 is reserved for the thread 525 mov(arg_1, O1); 526 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 527 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 528 call_VM(oop_result, entry_point, 3, check_exceptions); 529 } 530 531 532 533 // Note: The following call_VM overloadings are useful when a "save" 534 // has already been performed by a stub, and the last Java frame is 535 // the previous one. In that case, last_java_sp must be passed as FP 536 // instead of SP. 537 538 539 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 540 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 541 } 542 543 544 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 545 // O0 is reserved for the thread 546 mov(arg_1, O1); 547 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 548 } 549 550 551 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 552 // O0 is reserved for the thread 553 mov(arg_1, O1); 554 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 555 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 556 } 557 558 559 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 560 // O0 is reserved for the thread 561 mov(arg_1, O1); 562 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 563 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 564 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 565 } 566 567 568 569 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 570 assert_not_delayed(); 571 save_thread(thread_cache); 572 // do the call 573 call(entry_point, relocInfo::runtime_call_type); 574 delayed()->nop(); 575 restore_thread(thread_cache); 576 #ifdef ASSERT 577 set(badHeapWordVal, G3); 578 set(badHeapWordVal, G4); 579 set(badHeapWordVal, G5); 580 #endif 581 } 582 583 584 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 585 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 586 } 587 588 589 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 590 mov(arg_1, O0); 591 call_VM_leaf(thread_cache, entry_point, 1); 592 } 593 594 595 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 596 mov(arg_1, O0); 597 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 598 call_VM_leaf(thread_cache, entry_point, 2); 599 } 600 601 602 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 603 mov(arg_1, O0); 604 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 605 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 606 call_VM_leaf(thread_cache, entry_point, 3); 607 } 608 609 610 void MacroAssembler::get_vm_result(Register oop_result) { 611 verify_thread(); 612 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 613 ld_ptr( vm_result_addr, oop_result); 614 st_ptr(G0, vm_result_addr); 615 verify_oop(oop_result); 616 } 617 618 619 void MacroAssembler::get_vm_result_2(Register metadata_result) { 620 verify_thread(); 621 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 622 ld_ptr(vm_result_addr_2, metadata_result); 623 st_ptr(G0, vm_result_addr_2); 624 } 625 626 627 // We require that C code which does not return a value in vm_result will 628 // leave it undisturbed. 629 void MacroAssembler::set_vm_result(Register oop_result) { 630 verify_thread(); 631 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 632 verify_oop(oop_result); 633 634 # ifdef ASSERT 635 // Check that we are not overwriting any other oop. 636 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 637 ld_ptr(vm_result_addr, L0); 638 tst(L0); 639 restore(); 640 breakpoint_trap(notZero, Assembler::ptr_cc); 641 // } 642 # endif 643 644 st_ptr(oop_result, vm_result_addr); 645 } 646 647 648 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 649 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 650 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 651 relocate(rspec); 652 call(entry, relocInfo::none); 653 if (emit_delay) { 654 delayed()->nop(); 655 } 656 } 657 658 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 659 address save_pc; 660 int shiftcnt; 661 # ifdef CHECK_DELAY 662 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 663 # endif 664 v9_dep(); 665 save_pc = pc(); 666 667 int msb32 = (int) (addrlit.value() >> 32); 668 int lsb32 = (int) (addrlit.value()); 669 670 if (msb32 == 0 && lsb32 >= 0) { 671 Assembler::sethi(lsb32, d, addrlit.rspec()); 672 } 673 else if (msb32 == -1) { 674 Assembler::sethi(~lsb32, d, addrlit.rspec()); 675 xor3(d, ~low10(~0), d); 676 } 677 else { 678 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 679 if (msb32 & 0x3ff) // Any bits? 680 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 681 if (lsb32 & 0xFFFFFC00) { // done? 682 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 683 sllx(d, 12, d); // Make room for next 12 bits 684 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 685 shiftcnt = 0; // We already shifted 686 } 687 else 688 shiftcnt = 12; 689 if ((lsb32 >> 10) & 0x3ff) { 690 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 691 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 692 shiftcnt = 0; 693 } 694 else 695 shiftcnt = 10; 696 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 697 } 698 else 699 sllx(d, 32, d); 700 } 701 // Pad out the instruction sequence so it can be patched later. 702 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 703 addrlit.rtype() != relocInfo::runtime_call_type)) { 704 while (pc() < (save_pc + (7 * BytesPerInstWord))) 705 nop(); 706 } 707 } 708 709 710 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 711 internal_sethi(addrlit, d, false); 712 } 713 714 715 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 716 internal_sethi(addrlit, d, true); 717 } 718 719 720 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 721 if (worst_case) return 7; 722 intptr_t iaddr = (intptr_t) a; 723 int msb32 = (int) (iaddr >> 32); 724 int lsb32 = (int) (iaddr); 725 int count; 726 if (msb32 == 0 && lsb32 >= 0) 727 count = 1; 728 else if (msb32 == -1) 729 count = 2; 730 else { 731 count = 2; 732 if (msb32 & 0x3ff) 733 count++; 734 if (lsb32 & 0xFFFFFC00 ) { 735 if ((lsb32 >> 20) & 0xfff) count += 2; 736 if ((lsb32 >> 10) & 0x3ff) count += 2; 737 } 738 } 739 return count; 740 } 741 742 int MacroAssembler::worst_case_insts_for_set() { 743 return insts_for_sethi(NULL, true) + 1; 744 } 745 746 747 // Keep in sync with MacroAssembler::insts_for_internal_set 748 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 749 intptr_t value = addrlit.value(); 750 751 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 752 // can optimize 753 if (-4096 <= value && value <= 4095) { 754 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 755 return; 756 } 757 if (inv_hi22(hi22(value)) == value) { 758 sethi(addrlit, d); 759 return; 760 } 761 } 762 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 763 internal_sethi(addrlit, d, ForceRelocatable); 764 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 765 add(d, addrlit.low10(), d, addrlit.rspec()); 766 } 767 } 768 769 // Keep in sync with MacroAssembler::internal_set 770 int MacroAssembler::insts_for_internal_set(intptr_t value) { 771 // can optimize 772 if (-4096 <= value && value <= 4095) { 773 return 1; 774 } 775 if (inv_hi22(hi22(value)) == value) { 776 return insts_for_sethi((address) value); 777 } 778 int count = insts_for_sethi((address) value); 779 AddressLiteral al(value); 780 if (al.low10() != 0) { 781 count++; 782 } 783 return count; 784 } 785 786 void MacroAssembler::set(const AddressLiteral& al, Register d) { 787 internal_set(al, d, false); 788 } 789 790 void MacroAssembler::set(intptr_t value, Register d) { 791 AddressLiteral al(value); 792 internal_set(al, d, false); 793 } 794 795 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 796 AddressLiteral al(addr, rspec); 797 internal_set(al, d, false); 798 } 799 800 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 801 internal_set(al, d, true); 802 } 803 804 void MacroAssembler::patchable_set(intptr_t value, Register d) { 805 AddressLiteral al(value); 806 internal_set(al, d, true); 807 } 808 809 810 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 811 assert_not_delayed(); 812 v9_dep(); 813 814 int hi = (int)(value >> 32); 815 int lo = (int)(value & ~0); 816 int bits_33to2 = (int)((value >> 2) & ~0); 817 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 818 if (Assembler::is_simm13(lo) && value == lo) { 819 or3(G0, lo, d); 820 } else if (hi == 0) { 821 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 822 if (low10(lo) != 0) 823 or3(d, low10(lo), d); 824 } 825 else if ((hi >> 2) == 0) { 826 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 827 sllx(d, 2, d); 828 if (low12(lo) != 0) 829 or3(d, low12(lo), d); 830 } 831 else if (hi == -1) { 832 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 833 xor3(d, low10(lo) ^ ~low10(~0), d); 834 } 835 else if (lo == 0) { 836 if (Assembler::is_simm13(hi)) { 837 or3(G0, hi, d); 838 } else { 839 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 840 if (low10(hi) != 0) 841 or3(d, low10(hi), d); 842 } 843 sllx(d, 32, d); 844 } 845 else { 846 Assembler::sethi(hi, tmp); 847 Assembler::sethi(lo, d); // macro assembler version sign-extends 848 if (low10(hi) != 0) 849 or3 (tmp, low10(hi), tmp); 850 if (low10(lo) != 0) 851 or3 ( d, low10(lo), d); 852 sllx(tmp, 32, tmp); 853 or3 (d, tmp, d); 854 } 855 } 856 857 int MacroAssembler::insts_for_set64(jlong value) { 858 v9_dep(); 859 860 int hi = (int) (value >> 32); 861 int lo = (int) (value & ~0); 862 int count = 0; 863 864 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 865 if (Assembler::is_simm13(lo) && value == lo) { 866 count++; 867 } else if (hi == 0) { 868 count++; 869 if (low10(lo) != 0) 870 count++; 871 } 872 else if (hi == -1) { 873 count += 2; 874 } 875 else if (lo == 0) { 876 if (Assembler::is_simm13(hi)) { 877 count++; 878 } else { 879 count++; 880 if (low10(hi) != 0) 881 count++; 882 } 883 count++; 884 } 885 else { 886 count += 2; 887 if (low10(hi) != 0) 888 count++; 889 if (low10(lo) != 0) 890 count++; 891 count += 2; 892 } 893 return count; 894 } 895 896 // compute size in bytes of sparc frame, given 897 // number of extraWords 898 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 899 900 int nWords = frame::memory_parameter_word_sp_offset; 901 902 nWords += extraWords; 903 904 if (nWords & 1) ++nWords; // round up to double-word 905 906 return nWords * BytesPerWord; 907 } 908 909 910 // save_frame: given number of "extra" words in frame, 911 // issue approp. save instruction (p 200, v8 manual) 912 913 void MacroAssembler::save_frame(int extraWords) { 914 int delta = -total_frame_size_in_bytes(extraWords); 915 if (is_simm13(delta)) { 916 save(SP, delta, SP); 917 } else { 918 set(delta, G3_scratch); 919 save(SP, G3_scratch, SP); 920 } 921 } 922 923 924 void MacroAssembler::save_frame_c1(int size_in_bytes) { 925 if (is_simm13(-size_in_bytes)) { 926 save(SP, -size_in_bytes, SP); 927 } else { 928 set(-size_in_bytes, G3_scratch); 929 save(SP, G3_scratch, SP); 930 } 931 } 932 933 934 void MacroAssembler::save_frame_and_mov(int extraWords, 935 Register s1, Register d1, 936 Register s2, Register d2) { 937 assert_not_delayed(); 938 939 // The trick here is to use precisely the same memory word 940 // that trap handlers also use to save the register. 941 // This word cannot be used for any other purpose, but 942 // it works fine to save the register's value, whether or not 943 // an interrupt flushes register windows at any given moment! 944 Address s1_addr; 945 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 946 s1_addr = s1->address_in_saved_window(); 947 st_ptr(s1, s1_addr); 948 } 949 950 Address s2_addr; 951 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 952 s2_addr = s2->address_in_saved_window(); 953 st_ptr(s2, s2_addr); 954 } 955 956 save_frame(extraWords); 957 958 if (s1_addr.base() == SP) { 959 ld_ptr(s1_addr.after_save(), d1); 960 } else if (s1->is_valid()) { 961 mov(s1->after_save(), d1); 962 } 963 964 if (s2_addr.base() == SP) { 965 ld_ptr(s2_addr.after_save(), d2); 966 } else if (s2->is_valid()) { 967 mov(s2->after_save(), d2); 968 } 969 } 970 971 972 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 973 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 974 int index = oop_recorder()->allocate_metadata_index(obj); 975 RelocationHolder rspec = metadata_Relocation::spec(index); 976 return AddressLiteral((address)obj, rspec); 977 } 978 979 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 980 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 981 int index = oop_recorder()->find_index(obj); 982 RelocationHolder rspec = metadata_Relocation::spec(index); 983 return AddressLiteral((address)obj, rspec); 984 } 985 986 987 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 988 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 989 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 990 int oop_index = oop_recorder()->find_index(obj); 991 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 992 } 993 994 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 995 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 996 int oop_index = oop_recorder()->find_index(obj); 997 RelocationHolder rspec = oop_Relocation::spec(oop_index); 998 999 assert_not_delayed(); 1000 // Relocation with special format (see relocInfo_sparc.hpp). 1001 relocate(rspec, 1); 1002 // Assembler::sethi(0x3fffff, d); 1003 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1004 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1005 add(d, 0x3ff, d); 1006 1007 } 1008 1009 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1010 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1011 int klass_index = oop_recorder()->find_index(k); 1012 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1013 narrowOop encoded_k = Klass::encode_klass(k); 1014 1015 assert_not_delayed(); 1016 // Relocation with special format (see relocInfo_sparc.hpp). 1017 relocate(rspec, 1); 1018 // Assembler::sethi(encoded_k, d); 1019 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1020 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1021 add(d, low10(encoded_k), d); 1022 1023 } 1024 1025 void MacroAssembler::align(int modulus) { 1026 while (offset() % modulus != 0) nop(); 1027 } 1028 1029 void RegistersForDebugging::print(outputStream* s) { 1030 FlagSetting fs(Debugging, true); 1031 int j; 1032 for (j = 0; j < 8; ++j) { 1033 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1034 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1035 } 1036 s->cr(); 1037 1038 for (j = 0; j < 8; ++j) { 1039 s->print("l%d = ", j); os::print_location(s, l[j]); 1040 } 1041 s->cr(); 1042 1043 for (j = 0; j < 8; ++j) { 1044 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1045 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1046 } 1047 s->cr(); 1048 1049 for (j = 0; j < 8; ++j) { 1050 s->print("g%d = ", j); os::print_location(s, g[j]); 1051 } 1052 s->cr(); 1053 1054 // print out floats with compression 1055 for (j = 0; j < 32; ) { 1056 jfloat val = f[j]; 1057 int last = j; 1058 for ( ; last+1 < 32; ++last ) { 1059 char b1[1024], b2[1024]; 1060 sprintf(b1, "%f", val); 1061 sprintf(b2, "%f", f[last+1]); 1062 if (strcmp(b1, b2)) 1063 break; 1064 } 1065 s->print("f%d", j); 1066 if ( j != last ) s->print(" - f%d", last); 1067 s->print(" = %f", val); 1068 s->fill_to(25); 1069 s->print_cr(" (0x%x)", *(int*)&val); 1070 j = last + 1; 1071 } 1072 s->cr(); 1073 1074 // and doubles (evens only) 1075 for (j = 0; j < 32; ) { 1076 jdouble val = d[j]; 1077 int last = j; 1078 for ( ; last+1 < 32; ++last ) { 1079 char b1[1024], b2[1024]; 1080 sprintf(b1, "%f", val); 1081 sprintf(b2, "%f", d[last+1]); 1082 if (strcmp(b1, b2)) 1083 break; 1084 } 1085 s->print("d%d", 2 * j); 1086 if ( j != last ) s->print(" - d%d", last); 1087 s->print(" = %f", val); 1088 s->fill_to(30); 1089 s->print("(0x%x)", *(int*)&val); 1090 s->fill_to(42); 1091 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1092 j = last + 1; 1093 } 1094 s->cr(); 1095 } 1096 1097 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1098 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1099 a->flushw(); 1100 int i; 1101 for (i = 0; i < 8; ++i) { 1102 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1103 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1104 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1105 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1106 } 1107 for (i = 0; i < 32; ++i) { 1108 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1109 } 1110 for (i = 0; i < 64; i += 2) { 1111 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1112 } 1113 } 1114 1115 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1116 for (int i = 1; i < 8; ++i) { 1117 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1118 } 1119 for (int j = 0; j < 32; ++j) { 1120 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1121 } 1122 for (int k = 0; k < 64; k += 2) { 1123 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1124 } 1125 } 1126 1127 1128 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1129 void MacroAssembler::push_fTOS() { 1130 // %%%%%% need to implement this 1131 } 1132 1133 // pops double TOS element from CPU stack and pushes on FPU stack 1134 void MacroAssembler::pop_fTOS() { 1135 // %%%%%% need to implement this 1136 } 1137 1138 void MacroAssembler::empty_FPU_stack() { 1139 // %%%%%% need to implement this 1140 } 1141 1142 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1143 // plausibility check for oops 1144 if (!VerifyOops) return; 1145 1146 if (reg == G0) return; // always NULL, which is always an oop 1147 1148 BLOCK_COMMENT("verify_oop {"); 1149 char buffer[64]; 1150 #ifdef COMPILER1 1151 if (CommentedAssembly) { 1152 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1153 block_comment(buffer); 1154 } 1155 #endif 1156 1157 const char* real_msg = NULL; 1158 { 1159 ResourceMark rm; 1160 stringStream ss; 1161 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1162 real_msg = code_string(ss.as_string()); 1163 } 1164 1165 // Call indirectly to solve generation ordering problem 1166 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1167 1168 // Make some space on stack above the current register window. 1169 // Enough to hold 8 64-bit registers. 1170 add(SP,-8*8,SP); 1171 1172 // Save some 64-bit registers; a normal 'save' chops the heads off 1173 // of 64-bit longs in the 32-bit build. 1174 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1175 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1176 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1177 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1178 1179 // Size of set() should stay the same 1180 patchable_set((intptr_t)real_msg, O1); 1181 // Load address to call to into O7 1182 load_ptr_contents(a, O7); 1183 // Register call to verify_oop_subroutine 1184 callr(O7, G0); 1185 delayed()->nop(); 1186 // recover frame size 1187 add(SP, 8*8,SP); 1188 BLOCK_COMMENT("} verify_oop"); 1189 } 1190 1191 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1192 // plausibility check for oops 1193 if (!VerifyOops) return; 1194 1195 const char* real_msg = NULL; 1196 { 1197 ResourceMark rm; 1198 stringStream ss; 1199 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1200 real_msg = code_string(ss.as_string()); 1201 } 1202 1203 // Call indirectly to solve generation ordering problem 1204 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1205 1206 // Make some space on stack above the current register window. 1207 // Enough to hold 8 64-bit registers. 1208 add(SP,-8*8,SP); 1209 1210 // Save some 64-bit registers; a normal 'save' chops the heads off 1211 // of 64-bit longs in the 32-bit build. 1212 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1213 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1214 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1215 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1216 1217 // Size of set() should stay the same 1218 patchable_set((intptr_t)real_msg, O1); 1219 // Load address to call to into O7 1220 load_ptr_contents(a, O7); 1221 // Register call to verify_oop_subroutine 1222 callr(O7, G0); 1223 delayed()->nop(); 1224 // recover frame size 1225 add(SP, 8*8,SP); 1226 } 1227 1228 // side-door communication with signalHandler in os_solaris.cpp 1229 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1230 1231 // This macro is expanded just once; it creates shared code. Contract: 1232 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1233 // registers, including flags. May not use a register 'save', as this blows 1234 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1235 // call. 1236 void MacroAssembler::verify_oop_subroutine() { 1237 // Leaf call; no frame. 1238 Label succeed, fail, null_or_fail; 1239 1240 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1241 // O0 is now the oop to be checked. O7 is the return address. 1242 Register O0_obj = O0; 1243 1244 // Save some more registers for temps. 1245 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1246 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1247 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1248 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1249 1250 // Save flags 1251 Register O5_save_flags = O5; 1252 rdccr( O5_save_flags ); 1253 1254 { // count number of verifies 1255 Register O2_adr = O2; 1256 Register O3_accum = O3; 1257 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1258 } 1259 1260 Register O2_mask = O2; 1261 Register O3_bits = O3; 1262 Register O4_temp = O4; 1263 1264 // mark lower end of faulting range 1265 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1266 _verify_oop_implicit_branch[0] = pc(); 1267 1268 // We can't check the mark oop because it could be in the process of 1269 // locking or unlocking while this is running. 1270 set(Universe::verify_oop_mask (), O2_mask); 1271 set(Universe::verify_oop_bits (), O3_bits); 1272 1273 // assert((obj & oop_mask) == oop_bits); 1274 and3(O0_obj, O2_mask, O4_temp); 1275 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1276 1277 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1278 // the null_or_fail case is useless; must test for null separately 1279 br_null_short(O0_obj, pn, succeed); 1280 } 1281 1282 // Check the Klass* of this object for being in the right area of memory. 1283 // Cannot do the load in the delay above slot in case O0 is null 1284 load_klass(O0_obj, O0_obj); 1285 // assert((klass != NULL) 1286 br_null_short(O0_obj, pn, fail); 1287 1288 wrccr( O5_save_flags ); // Restore CCR's 1289 1290 // mark upper end of faulting range 1291 _verify_oop_implicit_branch[1] = pc(); 1292 1293 //----------------------- 1294 // all tests pass 1295 bind(succeed); 1296 1297 // Restore prior 64-bit registers 1298 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1299 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1300 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1301 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1302 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1303 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1304 1305 retl(); // Leaf return; restore prior O7 in delay slot 1306 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1307 1308 //----------------------- 1309 bind(null_or_fail); // nulls are less common but OK 1310 br_null(O0_obj, false, pt, succeed); 1311 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1312 1313 //----------------------- 1314 // report failure: 1315 bind(fail); 1316 _verify_oop_implicit_branch[2] = pc(); 1317 1318 wrccr( O5_save_flags ); // Restore CCR's 1319 1320 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1321 1322 // stop_subroutine expects message pointer in I1. 1323 mov(I1, O1); 1324 1325 // Restore prior 64-bit registers 1326 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1327 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1328 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1329 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1330 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1331 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1332 1333 // factor long stop-sequence into subroutine to save space 1334 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1335 1336 // call indirectly to solve generation ordering problem 1337 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1338 load_ptr_contents(al, O5); 1339 jmpl(O5, 0, O7); 1340 delayed()->nop(); 1341 } 1342 1343 1344 void MacroAssembler::stop(const char* msg) { 1345 // save frame first to get O7 for return address 1346 // add one word to size in case struct is odd number of words long 1347 // It must be doubleword-aligned for storing doubles into it. 1348 1349 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1350 1351 // stop_subroutine expects message pointer in I1. 1352 // Size of set() should stay the same 1353 patchable_set((intptr_t)msg, O1); 1354 1355 // factor long stop-sequence into subroutine to save space 1356 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1357 1358 // call indirectly to solve generation ordering problem 1359 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1360 load_ptr_contents(a, O5); 1361 jmpl(O5, 0, O7); 1362 delayed()->nop(); 1363 1364 breakpoint_trap(); // make stop actually stop rather than writing 1365 // unnoticeable results in the output files. 1366 1367 // restore(); done in callee to save space! 1368 } 1369 1370 1371 void MacroAssembler::warn(const char* msg) { 1372 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1373 RegistersForDebugging::save_registers(this); 1374 mov(O0, L0); 1375 // Size of set() should stay the same 1376 patchable_set((intptr_t)msg, O0); 1377 call( CAST_FROM_FN_PTR(address, warning) ); 1378 delayed()->nop(); 1379 // ret(); 1380 // delayed()->restore(); 1381 RegistersForDebugging::restore_registers(this, L0); 1382 restore(); 1383 } 1384 1385 1386 void MacroAssembler::untested(const char* what) { 1387 // We must be able to turn interactive prompting off 1388 // in order to run automated test scripts on the VM 1389 // Use the flag ShowMessageBoxOnError 1390 1391 const char* b = NULL; 1392 { 1393 ResourceMark rm; 1394 stringStream ss; 1395 ss.print("untested: %s", what); 1396 b = code_string(ss.as_string()); 1397 } 1398 if (ShowMessageBoxOnError) { STOP(b); } 1399 else { warn(b); } 1400 } 1401 1402 1403 void MacroAssembler::stop_subroutine() { 1404 RegistersForDebugging::save_registers(this); 1405 1406 // for the sake of the debugger, stick a PC on the current frame 1407 // (this assumes that the caller has performed an extra "save") 1408 mov(I7, L7); 1409 add(O7, -7 * BytesPerInt, I7); 1410 1411 save_frame(); // one more save to free up another O7 register 1412 mov(I0, O1); // addr of reg save area 1413 1414 // We expect pointer to message in I1. Caller must set it up in O1 1415 mov(I1, O0); // get msg 1416 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1417 delayed()->nop(); 1418 1419 restore(); 1420 1421 RegistersForDebugging::restore_registers(this, O0); 1422 1423 save_frame(0); 1424 call(CAST_FROM_FN_PTR(address,breakpoint)); 1425 delayed()->nop(); 1426 restore(); 1427 1428 mov(L7, I7); 1429 retl(); 1430 delayed()->restore(); // see stop above 1431 } 1432 1433 1434 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1435 if ( ShowMessageBoxOnError ) { 1436 JavaThread* thread = JavaThread::current(); 1437 JavaThreadState saved_state = thread->thread_state(); 1438 thread->set_thread_state(_thread_in_vm); 1439 { 1440 // In order to get locks work, we need to fake a in_VM state 1441 ttyLocker ttyl; 1442 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1443 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1444 BytecodeCounter::print(); 1445 } 1446 if (os::message_box(msg, "Execution stopped, print registers?")) 1447 regs->print(::tty); 1448 } 1449 BREAKPOINT; 1450 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1451 } 1452 else { 1453 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1454 } 1455 assert(false, "DEBUG MESSAGE: %s", msg); 1456 } 1457 1458 1459 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1460 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1461 Label no_extras; 1462 br( negative, true, pt, no_extras ); // if neg, clear reg 1463 delayed()->set(0, Rresult); // annuled, so only if taken 1464 bind( no_extras ); 1465 } 1466 1467 1468 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1469 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1470 bclr(1, Rresult); 1471 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1472 } 1473 1474 1475 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1476 calc_frame_size(Rextra_words, Rresult); 1477 neg(Rresult); 1478 save(SP, Rresult, SP); 1479 } 1480 1481 1482 // --------------------------------------------------------- 1483 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1484 switch (c) { 1485 /*case zero: */ 1486 case Assembler::equal: return Assembler::rc_z; 1487 case Assembler::lessEqual: return Assembler::rc_lez; 1488 case Assembler::less: return Assembler::rc_lz; 1489 /*case notZero:*/ 1490 case Assembler::notEqual: return Assembler::rc_nz; 1491 case Assembler::greater: return Assembler::rc_gz; 1492 case Assembler::greaterEqual: return Assembler::rc_gez; 1493 } 1494 ShouldNotReachHere(); 1495 return Assembler::rc_z; 1496 } 1497 1498 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1499 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1500 tst(s1); 1501 br (c, a, p, L); 1502 } 1503 1504 // Compares a pointer register with zero and branches on null. 1505 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1506 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1507 assert_not_delayed(); 1508 bpr( rc_z, a, p, s1, L ); 1509 } 1510 1511 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1512 assert_not_delayed(); 1513 bpr( rc_nz, a, p, s1, L ); 1514 } 1515 1516 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1517 1518 // Compare integer (32 bit) values (icc only). 1519 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1520 Predict p, Label& L) { 1521 assert_not_delayed(); 1522 if (use_cbcond(L)) { 1523 Assembler::cbcond(c, icc, s1, s2, L); 1524 } else { 1525 cmp(s1, s2); 1526 br(c, false, p, L); 1527 delayed()->nop(); 1528 } 1529 } 1530 1531 // Compare integer (32 bit) values (icc only). 1532 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1533 Predict p, Label& L) { 1534 assert_not_delayed(); 1535 if (is_simm(simm13a,5) && use_cbcond(L)) { 1536 Assembler::cbcond(c, icc, s1, simm13a, L); 1537 } else { 1538 cmp(s1, simm13a); 1539 br(c, false, p, L); 1540 delayed()->nop(); 1541 } 1542 } 1543 1544 // Branch that tests xcc in LP64 and icc in !LP64 1545 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1546 Predict p, Label& L) { 1547 assert_not_delayed(); 1548 if (use_cbcond(L)) { 1549 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1550 } else { 1551 cmp(s1, s2); 1552 brx(c, false, p, L); 1553 delayed()->nop(); 1554 } 1555 } 1556 1557 // Branch that tests xcc in LP64 and icc in !LP64 1558 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1559 Predict p, Label& L) { 1560 assert_not_delayed(); 1561 if (is_simm(simm13a,5) && use_cbcond(L)) { 1562 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1563 } else { 1564 cmp(s1, simm13a); 1565 brx(c, false, p, L); 1566 delayed()->nop(); 1567 } 1568 } 1569 1570 // Short branch version for compares a pointer with zero. 1571 1572 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1573 assert_not_delayed(); 1574 if (use_cbcond(L)) { 1575 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1576 return; 1577 } 1578 br_null(s1, false, p, L); 1579 delayed()->nop(); 1580 } 1581 1582 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1583 assert_not_delayed(); 1584 if (use_cbcond(L)) { 1585 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1586 return; 1587 } 1588 br_notnull(s1, false, p, L); 1589 delayed()->nop(); 1590 } 1591 1592 // Unconditional short branch 1593 void MacroAssembler::ba_short(Label& L) { 1594 if (use_cbcond(L)) { 1595 Assembler::cbcond(equal, icc, G0, G0, L); 1596 return; 1597 } 1598 br(always, false, pt, L); 1599 delayed()->nop(); 1600 } 1601 1602 // instruction sequences factored across compiler & interpreter 1603 1604 1605 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1606 Register Rb_hi, Register Rb_low, 1607 Register Rresult) { 1608 1609 Label check_low_parts, done; 1610 1611 cmp(Ra_hi, Rb_hi ); // compare hi parts 1612 br(equal, true, pt, check_low_parts); 1613 delayed()->cmp(Ra_low, Rb_low); // test low parts 1614 1615 // And, with an unsigned comparison, it does not matter if the numbers 1616 // are negative or not. 1617 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1618 // The second one is bigger (unsignedly). 1619 1620 // Other notes: The first move in each triplet can be unconditional 1621 // (and therefore probably prefetchable). 1622 // And the equals case for the high part does not need testing, 1623 // since that triplet is reached only after finding the high halves differ. 1624 1625 mov(-1, Rresult); 1626 ba(done); 1627 delayed()->movcc(greater, false, icc, 1, Rresult); 1628 1629 bind(check_low_parts); 1630 1631 mov( -1, Rresult); 1632 movcc(equal, false, icc, 0, Rresult); 1633 movcc(greaterUnsigned, false, icc, 1, Rresult); 1634 1635 bind(done); 1636 } 1637 1638 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1639 subcc( G0, Rlow, Rlow ); 1640 subc( G0, Rhi, Rhi ); 1641 } 1642 1643 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1644 Register Rcount, 1645 Register Rout_high, Register Rout_low, 1646 Register Rtemp ) { 1647 1648 1649 Register Ralt_count = Rtemp; 1650 Register Rxfer_bits = Rtemp; 1651 1652 assert( Ralt_count != Rin_high 1653 && Ralt_count != Rin_low 1654 && Ralt_count != Rcount 1655 && Rxfer_bits != Rin_low 1656 && Rxfer_bits != Rin_high 1657 && Rxfer_bits != Rcount 1658 && Rxfer_bits != Rout_low 1659 && Rout_low != Rin_high, 1660 "register alias checks"); 1661 1662 Label big_shift, done; 1663 1664 // This code can be optimized to use the 64 bit shifts in V9. 1665 // Here we use the 32 bit shifts. 1666 1667 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1668 subcc(Rcount, 31, Ralt_count); 1669 br(greater, true, pn, big_shift); 1670 delayed()->dec(Ralt_count); 1671 1672 // shift < 32 bits, Ralt_count = Rcount-31 1673 1674 // We get the transfer bits by shifting right by 32-count the low 1675 // register. This is done by shifting right by 31-count and then by one 1676 // more to take care of the special (rare) case where count is zero 1677 // (shifting by 32 would not work). 1678 1679 neg(Ralt_count); 1680 1681 // The order of the next two instructions is critical in the case where 1682 // Rin and Rout are the same and should not be reversed. 1683 1684 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1685 if (Rcount != Rout_low) { 1686 sll(Rin_low, Rcount, Rout_low); // low half 1687 } 1688 sll(Rin_high, Rcount, Rout_high); 1689 if (Rcount == Rout_low) { 1690 sll(Rin_low, Rcount, Rout_low); // low half 1691 } 1692 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1693 ba(done); 1694 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1695 1696 // shift >= 32 bits, Ralt_count = Rcount-32 1697 bind(big_shift); 1698 sll(Rin_low, Ralt_count, Rout_high ); 1699 clr(Rout_low); 1700 1701 bind(done); 1702 } 1703 1704 1705 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1706 Register Rcount, 1707 Register Rout_high, Register Rout_low, 1708 Register Rtemp ) { 1709 1710 Register Ralt_count = Rtemp; 1711 Register Rxfer_bits = Rtemp; 1712 1713 assert( Ralt_count != Rin_high 1714 && Ralt_count != Rin_low 1715 && Ralt_count != Rcount 1716 && Rxfer_bits != Rin_low 1717 && Rxfer_bits != Rin_high 1718 && Rxfer_bits != Rcount 1719 && Rxfer_bits != Rout_high 1720 && Rout_high != Rin_low, 1721 "register alias checks"); 1722 1723 Label big_shift, done; 1724 1725 // This code can be optimized to use the 64 bit shifts in V9. 1726 // Here we use the 32 bit shifts. 1727 1728 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1729 subcc(Rcount, 31, Ralt_count); 1730 br(greater, true, pn, big_shift); 1731 delayed()->dec(Ralt_count); 1732 1733 // shift < 32 bits, Ralt_count = Rcount-31 1734 1735 // We get the transfer bits by shifting left by 32-count the high 1736 // register. This is done by shifting left by 31-count and then by one 1737 // more to take care of the special (rare) case where count is zero 1738 // (shifting by 32 would not work). 1739 1740 neg(Ralt_count); 1741 if (Rcount != Rout_low) { 1742 srl(Rin_low, Rcount, Rout_low); 1743 } 1744 1745 // The order of the next two instructions is critical in the case where 1746 // Rin and Rout are the same and should not be reversed. 1747 1748 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1749 sra(Rin_high, Rcount, Rout_high ); // high half 1750 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1751 if (Rcount == Rout_low) { 1752 srl(Rin_low, Rcount, Rout_low); 1753 } 1754 ba(done); 1755 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1756 1757 // shift >= 32 bits, Ralt_count = Rcount-32 1758 bind(big_shift); 1759 1760 sra(Rin_high, Ralt_count, Rout_low); 1761 sra(Rin_high, 31, Rout_high); // sign into hi 1762 1763 bind( done ); 1764 } 1765 1766 1767 1768 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1769 Register Rcount, 1770 Register Rout_high, Register Rout_low, 1771 Register Rtemp ) { 1772 1773 Register Ralt_count = Rtemp; 1774 Register Rxfer_bits = Rtemp; 1775 1776 assert( Ralt_count != Rin_high 1777 && Ralt_count != Rin_low 1778 && Ralt_count != Rcount 1779 && Rxfer_bits != Rin_low 1780 && Rxfer_bits != Rin_high 1781 && Rxfer_bits != Rcount 1782 && Rxfer_bits != Rout_high 1783 && Rout_high != Rin_low, 1784 "register alias checks"); 1785 1786 Label big_shift, done; 1787 1788 // This code can be optimized to use the 64 bit shifts in V9. 1789 // Here we use the 32 bit shifts. 1790 1791 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1792 subcc(Rcount, 31, Ralt_count); 1793 br(greater, true, pn, big_shift); 1794 delayed()->dec(Ralt_count); 1795 1796 // shift < 32 bits, Ralt_count = Rcount-31 1797 1798 // We get the transfer bits by shifting left by 32-count the high 1799 // register. This is done by shifting left by 31-count and then by one 1800 // more to take care of the special (rare) case where count is zero 1801 // (shifting by 32 would not work). 1802 1803 neg(Ralt_count); 1804 if (Rcount != Rout_low) { 1805 srl(Rin_low, Rcount, Rout_low); 1806 } 1807 1808 // The order of the next two instructions is critical in the case where 1809 // Rin and Rout are the same and should not be reversed. 1810 1811 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1812 srl(Rin_high, Rcount, Rout_high ); // high half 1813 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1814 if (Rcount == Rout_low) { 1815 srl(Rin_low, Rcount, Rout_low); 1816 } 1817 ba(done); 1818 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1819 1820 // shift >= 32 bits, Ralt_count = Rcount-32 1821 bind(big_shift); 1822 1823 srl(Rin_high, Ralt_count, Rout_low); 1824 clr(Rout_high); 1825 1826 bind( done ); 1827 } 1828 1829 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1830 cmp(Ra, Rb); 1831 mov(-1, Rresult); 1832 movcc(equal, false, xcc, 0, Rresult); 1833 movcc(greater, false, xcc, 1, Rresult); 1834 } 1835 1836 1837 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1838 switch (size_in_bytes) { 1839 case 8: ld_long(src, dst); break; 1840 case 4: ld( src, dst); break; 1841 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1842 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1843 default: ShouldNotReachHere(); 1844 } 1845 } 1846 1847 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1848 switch (size_in_bytes) { 1849 case 8: st_long(src, dst); break; 1850 case 4: st( src, dst); break; 1851 case 2: sth( src, dst); break; 1852 case 1: stb( src, dst); break; 1853 default: ShouldNotReachHere(); 1854 } 1855 } 1856 1857 1858 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1859 FloatRegister Fa, FloatRegister Fb, 1860 Register Rresult) { 1861 if (is_float) { 1862 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1863 } else { 1864 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1865 } 1866 1867 if (unordered_result == 1) { 1868 mov( -1, Rresult); 1869 movcc(f_equal, true, fcc0, 0, Rresult); 1870 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1871 } else { 1872 mov( -1, Rresult); 1873 movcc(f_equal, true, fcc0, 0, Rresult); 1874 movcc(f_greater, true, fcc0, 1, Rresult); 1875 } 1876 } 1877 1878 1879 void MacroAssembler::save_all_globals_into_locals() { 1880 mov(G1,L1); 1881 mov(G2,L2); 1882 mov(G3,L3); 1883 mov(G4,L4); 1884 mov(G5,L5); 1885 mov(G6,L6); 1886 mov(G7,L7); 1887 } 1888 1889 void MacroAssembler::restore_globals_from_locals() { 1890 mov(L1,G1); 1891 mov(L2,G2); 1892 mov(L3,G3); 1893 mov(L4,G4); 1894 mov(L5,G5); 1895 mov(L6,G6); 1896 mov(L7,G7); 1897 } 1898 1899 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1900 Register tmp, 1901 int offset) { 1902 intptr_t value = *delayed_value_addr; 1903 if (value != 0) 1904 return RegisterOrConstant(value + offset); 1905 1906 // load indirectly to solve generation ordering problem 1907 AddressLiteral a(delayed_value_addr); 1908 load_ptr_contents(a, tmp); 1909 1910 #ifdef ASSERT 1911 tst(tmp); 1912 breakpoint_trap(zero, xcc); 1913 #endif 1914 1915 if (offset != 0) 1916 add(tmp, offset, tmp); 1917 1918 return RegisterOrConstant(tmp); 1919 } 1920 1921 1922 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1923 assert(d.register_or_noreg() != G0, "lost side effect"); 1924 if ((s2.is_constant() && s2.as_constant() == 0) || 1925 (s2.is_register() && s2.as_register() == G0)) { 1926 // Do nothing, just move value. 1927 if (s1.is_register()) { 1928 if (d.is_constant()) d = temp; 1929 mov(s1.as_register(), d.as_register()); 1930 return d; 1931 } else { 1932 return s1; 1933 } 1934 } 1935 1936 if (s1.is_register()) { 1937 assert_different_registers(s1.as_register(), temp); 1938 if (d.is_constant()) d = temp; 1939 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1940 return d; 1941 } else { 1942 if (s2.is_register()) { 1943 assert_different_registers(s2.as_register(), temp); 1944 if (d.is_constant()) d = temp; 1945 set(s1.as_constant(), temp); 1946 andn(temp, s2.as_register(), d.as_register()); 1947 return d; 1948 } else { 1949 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1950 return res; 1951 } 1952 } 1953 } 1954 1955 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1956 assert(d.register_or_noreg() != G0, "lost side effect"); 1957 if ((s2.is_constant() && s2.as_constant() == 0) || 1958 (s2.is_register() && s2.as_register() == G0)) { 1959 // Do nothing, just move value. 1960 if (s1.is_register()) { 1961 if (d.is_constant()) d = temp; 1962 mov(s1.as_register(), d.as_register()); 1963 return d; 1964 } else { 1965 return s1; 1966 } 1967 } 1968 1969 if (s1.is_register()) { 1970 assert_different_registers(s1.as_register(), temp); 1971 if (d.is_constant()) d = temp; 1972 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1973 return d; 1974 } else { 1975 if (s2.is_register()) { 1976 assert_different_registers(s2.as_register(), temp); 1977 if (d.is_constant()) d = temp; 1978 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 1979 return d; 1980 } else { 1981 intptr_t res = s1.as_constant() + s2.as_constant(); 1982 return res; 1983 } 1984 } 1985 } 1986 1987 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1988 assert(d.register_or_noreg() != G0, "lost side effect"); 1989 if (!is_simm13(s2.constant_or_zero())) 1990 s2 = (s2.as_constant() & 0xFF); 1991 if ((s2.is_constant() && s2.as_constant() == 0) || 1992 (s2.is_register() && s2.as_register() == G0)) { 1993 // Do nothing, just move value. 1994 if (s1.is_register()) { 1995 if (d.is_constant()) d = temp; 1996 mov(s1.as_register(), d.as_register()); 1997 return d; 1998 } else { 1999 return s1; 2000 } 2001 } 2002 2003 if (s1.is_register()) { 2004 assert_different_registers(s1.as_register(), temp); 2005 if (d.is_constant()) d = temp; 2006 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2007 return d; 2008 } else { 2009 if (s2.is_register()) { 2010 assert_different_registers(s2.as_register(), temp); 2011 if (d.is_constant()) d = temp; 2012 set(s1.as_constant(), temp); 2013 sll_ptr(temp, s2.as_register(), d.as_register()); 2014 return d; 2015 } else { 2016 intptr_t res = s1.as_constant() << s2.as_constant(); 2017 return res; 2018 } 2019 } 2020 } 2021 2022 2023 // Look up the method for a megamorphic invokeinterface call. 2024 // The target method is determined by <intf_klass, itable_index>. 2025 // The receiver klass is in recv_klass. 2026 // On success, the result will be in method_result, and execution falls through. 2027 // On failure, execution transfers to the given label. 2028 void MacroAssembler::lookup_interface_method(Register recv_klass, 2029 Register intf_klass, 2030 RegisterOrConstant itable_index, 2031 Register method_result, 2032 Register scan_temp, 2033 Register sethi_temp, 2034 Label& L_no_such_interface) { 2035 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2036 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2037 "caller must use same register for non-constant itable index as for method"); 2038 2039 Label L_no_such_interface_restore; 2040 bool did_save = false; 2041 if (scan_temp == noreg || sethi_temp == noreg) { 2042 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2043 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2044 assert(method_result->is_global(), "must be able to return value"); 2045 scan_temp = L2; 2046 sethi_temp = L3; 2047 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2048 recv_klass = recv_2; 2049 intf_klass = intf_2; 2050 did_save = true; 2051 } 2052 2053 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2054 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2055 int scan_step = itableOffsetEntry::size() * wordSize; 2056 int vte_size = vtableEntry::size_in_bytes(); 2057 2058 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2059 // %%% We should store the aligned, prescaled offset in the klassoop. 2060 // Then the next several instructions would fold away. 2061 2062 int itb_offset = vtable_base; 2063 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2064 sll(scan_temp, itb_scale, scan_temp); 2065 add(scan_temp, itb_offset, scan_temp); 2066 add(recv_klass, scan_temp, scan_temp); 2067 2068 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2069 RegisterOrConstant itable_offset = itable_index; 2070 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2071 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2072 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2073 2074 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2075 // if (scan->interface() == intf) { 2076 // result = (klass + scan->offset() + itable_index); 2077 // } 2078 // } 2079 Label L_search, L_found_method; 2080 2081 for (int peel = 1; peel >= 0; peel--) { 2082 // %%%% Could load both offset and interface in one ldx, if they were 2083 // in the opposite order. This would save a load. 2084 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2085 2086 // Check that this entry is non-null. A null entry means that 2087 // the receiver class doesn't implement the interface, and wasn't the 2088 // same as when the caller was compiled. 2089 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2090 delayed()->cmp(method_result, intf_klass); 2091 2092 if (peel) { 2093 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2094 } else { 2095 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2096 // (invert the test to fall through to found_method...) 2097 } 2098 delayed()->add(scan_temp, scan_step, scan_temp); 2099 2100 if (!peel) break; 2101 2102 bind(L_search); 2103 } 2104 2105 bind(L_found_method); 2106 2107 // Got a hit. 2108 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2109 // scan_temp[-scan_step] points to the vtable offset we need 2110 ito_offset -= scan_step; 2111 lduw(scan_temp, ito_offset, scan_temp); 2112 ld_ptr(recv_klass, scan_temp, method_result); 2113 2114 if (did_save) { 2115 Label L_done; 2116 ba(L_done); 2117 delayed()->restore(); 2118 2119 bind(L_no_such_interface_restore); 2120 ba(L_no_such_interface); 2121 delayed()->restore(); 2122 2123 bind(L_done); 2124 } 2125 } 2126 2127 2128 // virtual method calling 2129 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2130 RegisterOrConstant vtable_index, 2131 Register method_result) { 2132 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2133 Register sethi_temp = method_result; 2134 const int base = in_bytes(Klass::vtable_start_offset()) + 2135 // method pointer offset within the vtable entry: 2136 vtableEntry::method_offset_in_bytes(); 2137 RegisterOrConstant vtable_offset = vtable_index; 2138 // Each of the following three lines potentially generates an instruction. 2139 // But the total number of address formation instructions will always be 2140 // at most two, and will often be zero. In any case, it will be optimal. 2141 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2142 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2143 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2144 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2145 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2146 ld_ptr(vtable_entry_addr, method_result); 2147 } 2148 2149 2150 void MacroAssembler::check_klass_subtype(Register sub_klass, 2151 Register super_klass, 2152 Register temp_reg, 2153 Register temp2_reg, 2154 Label& L_success) { 2155 Register sub_2 = sub_klass; 2156 Register sup_2 = super_klass; 2157 if (!sub_2->is_global()) sub_2 = L0; 2158 if (!sup_2->is_global()) sup_2 = L1; 2159 bool did_save = false; 2160 if (temp_reg == noreg || temp2_reg == noreg) { 2161 temp_reg = L2; 2162 temp2_reg = L3; 2163 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2164 sub_klass = sub_2; 2165 super_klass = sup_2; 2166 did_save = true; 2167 } 2168 Label L_failure, L_pop_to_failure, L_pop_to_success; 2169 check_klass_subtype_fast_path(sub_klass, super_klass, 2170 temp_reg, temp2_reg, 2171 (did_save ? &L_pop_to_success : &L_success), 2172 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2173 2174 if (!did_save) 2175 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2176 check_klass_subtype_slow_path(sub_2, sup_2, 2177 L2, L3, L4, L5, 2178 NULL, &L_pop_to_failure); 2179 2180 // on success: 2181 bind(L_pop_to_success); 2182 restore(); 2183 ba_short(L_success); 2184 2185 // on failure: 2186 bind(L_pop_to_failure); 2187 restore(); 2188 bind(L_failure); 2189 } 2190 2191 2192 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2193 Register super_klass, 2194 Register temp_reg, 2195 Register temp2_reg, 2196 Label* L_success, 2197 Label* L_failure, 2198 Label* L_slow_path, 2199 RegisterOrConstant super_check_offset) { 2200 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2201 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2202 2203 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2204 bool need_slow_path = (must_load_sco || 2205 super_check_offset.constant_or_zero() == sco_offset); 2206 2207 assert_different_registers(sub_klass, super_klass, temp_reg); 2208 if (super_check_offset.is_register()) { 2209 assert_different_registers(sub_klass, super_klass, temp_reg, 2210 super_check_offset.as_register()); 2211 } else if (must_load_sco) { 2212 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2213 } 2214 2215 Label L_fallthrough; 2216 int label_nulls = 0; 2217 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2218 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2219 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2220 assert(label_nulls <= 1 || 2221 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2222 "at most one NULL in the batch, usually"); 2223 2224 // If the pointers are equal, we are done (e.g., String[] elements). 2225 // This self-check enables sharing of secondary supertype arrays among 2226 // non-primary types such as array-of-interface. Otherwise, each such 2227 // type would need its own customized SSA. 2228 // We move this check to the front of the fast path because many 2229 // type checks are in fact trivially successful in this manner, 2230 // so we get a nicely predicted branch right at the start of the check. 2231 cmp(super_klass, sub_klass); 2232 brx(Assembler::equal, false, Assembler::pn, *L_success); 2233 delayed()->nop(); 2234 2235 // Check the supertype display: 2236 if (must_load_sco) { 2237 // The super check offset is always positive... 2238 lduw(super_klass, sco_offset, temp2_reg); 2239 super_check_offset = RegisterOrConstant(temp2_reg); 2240 // super_check_offset is register. 2241 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2242 } 2243 ld_ptr(sub_klass, super_check_offset, temp_reg); 2244 cmp(super_klass, temp_reg); 2245 2246 // This check has worked decisively for primary supers. 2247 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2248 // (Secondary supers are interfaces and very deeply nested subtypes.) 2249 // This works in the same check above because of a tricky aliasing 2250 // between the super_cache and the primary super display elements. 2251 // (The 'super_check_addr' can address either, as the case requires.) 2252 // Note that the cache is updated below if it does not help us find 2253 // what we need immediately. 2254 // So if it was a primary super, we can just fail immediately. 2255 // Otherwise, it's the slow path for us (no success at this point). 2256 2257 // Hacked ba(), which may only be used just before L_fallthrough. 2258 #define FINAL_JUMP(label) \ 2259 if (&(label) != &L_fallthrough) { \ 2260 ba(label); delayed()->nop(); \ 2261 } 2262 2263 if (super_check_offset.is_register()) { 2264 brx(Assembler::equal, false, Assembler::pn, *L_success); 2265 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2266 2267 if (L_failure == &L_fallthrough) { 2268 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2269 delayed()->nop(); 2270 } else { 2271 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2272 delayed()->nop(); 2273 FINAL_JUMP(*L_slow_path); 2274 } 2275 } else if (super_check_offset.as_constant() == sc_offset) { 2276 // Need a slow path; fast failure is impossible. 2277 if (L_slow_path == &L_fallthrough) { 2278 brx(Assembler::equal, false, Assembler::pt, *L_success); 2279 delayed()->nop(); 2280 } else { 2281 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2282 delayed()->nop(); 2283 FINAL_JUMP(*L_success); 2284 } 2285 } else { 2286 // No slow path; it's a fast decision. 2287 if (L_failure == &L_fallthrough) { 2288 brx(Assembler::equal, false, Assembler::pt, *L_success); 2289 delayed()->nop(); 2290 } else { 2291 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2292 delayed()->nop(); 2293 FINAL_JUMP(*L_success); 2294 } 2295 } 2296 2297 bind(L_fallthrough); 2298 2299 #undef FINAL_JUMP 2300 } 2301 2302 2303 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2304 Register super_klass, 2305 Register count_temp, 2306 Register scan_temp, 2307 Register scratch_reg, 2308 Register coop_reg, 2309 Label* L_success, 2310 Label* L_failure) { 2311 assert_different_registers(sub_klass, super_klass, 2312 count_temp, scan_temp, scratch_reg, coop_reg); 2313 2314 Label L_fallthrough, L_loop; 2315 int label_nulls = 0; 2316 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2317 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2318 assert(label_nulls <= 1, "at most one NULL in the batch"); 2319 2320 // a couple of useful fields in sub_klass: 2321 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2322 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2323 2324 // Do a linear scan of the secondary super-klass chain. 2325 // This code is rarely used, so simplicity is a virtue here. 2326 2327 #ifndef PRODUCT 2328 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2329 inc_counter((address) pst_counter, count_temp, scan_temp); 2330 #endif 2331 2332 // We will consult the secondary-super array. 2333 ld_ptr(sub_klass, ss_offset, scan_temp); 2334 2335 Register search_key = super_klass; 2336 2337 // Load the array length. (Positive movl does right thing on LP64.) 2338 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2339 2340 // Check for empty secondary super list 2341 tst(count_temp); 2342 2343 // In the array of super classes elements are pointer sized. 2344 int element_size = wordSize; 2345 2346 // Top of search loop 2347 bind(L_loop); 2348 br(Assembler::equal, false, Assembler::pn, *L_failure); 2349 delayed()->add(scan_temp, element_size, scan_temp); 2350 2351 // Skip the array header in all array accesses. 2352 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2353 elem_offset -= element_size; // the scan pointer was pre-incremented also 2354 2355 // Load next super to check 2356 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2357 2358 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2359 cmp(scratch_reg, search_key); 2360 2361 // A miss means we are NOT a subtype and need to keep looping 2362 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2363 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2364 2365 // Success. Cache the super we found and proceed in triumph. 2366 st_ptr(super_klass, sub_klass, sc_offset); 2367 2368 if (L_success != &L_fallthrough) { 2369 ba(*L_success); 2370 delayed()->nop(); 2371 } 2372 2373 bind(L_fallthrough); 2374 } 2375 2376 2377 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2378 Register temp_reg, 2379 int extra_slot_offset) { 2380 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2381 int stackElementSize = Interpreter::stackElementSize; 2382 int offset = extra_slot_offset * stackElementSize; 2383 if (arg_slot.is_constant()) { 2384 offset += arg_slot.as_constant() * stackElementSize; 2385 return offset; 2386 } else { 2387 assert(temp_reg != noreg, "must specify"); 2388 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2389 if (offset != 0) 2390 add(temp_reg, offset, temp_reg); 2391 return temp_reg; 2392 } 2393 } 2394 2395 2396 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2397 Register temp_reg, 2398 int extra_slot_offset) { 2399 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2400 } 2401 2402 2403 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2404 Register temp_reg, 2405 Label& done, Label* slow_case, 2406 BiasedLockingCounters* counters) { 2407 assert(UseBiasedLocking, "why call this otherwise?"); 2408 2409 if (PrintBiasedLockingStatistics) { 2410 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2411 if (counters == NULL) 2412 counters = BiasedLocking::counters(); 2413 } 2414 2415 Label cas_label; 2416 2417 // Biased locking 2418 // See whether the lock is currently biased toward our thread and 2419 // whether the epoch is still valid 2420 // Note that the runtime guarantees sufficient alignment of JavaThread 2421 // pointers to allow age to be placed into low bits 2422 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2423 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2424 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2425 2426 load_klass(obj_reg, temp_reg); 2427 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2428 or3(G2_thread, temp_reg, temp_reg); 2429 xor3(mark_reg, temp_reg, temp_reg); 2430 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2431 if (counters != NULL) { 2432 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2433 // Reload mark_reg as we may need it later 2434 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2435 } 2436 brx(Assembler::equal, true, Assembler::pt, done); 2437 delayed()->nop(); 2438 2439 Label try_revoke_bias; 2440 Label try_rebias; 2441 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2442 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2443 2444 // At this point we know that the header has the bias pattern and 2445 // that we are not the bias owner in the current epoch. We need to 2446 // figure out more details about the state of the header in order to 2447 // know what operations can be legally performed on the object's 2448 // header. 2449 2450 // If the low three bits in the xor result aren't clear, that means 2451 // the prototype header is no longer biased and we have to revoke 2452 // the bias on this object. 2453 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2454 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2455 2456 // Biasing is still enabled for this data type. See whether the 2457 // epoch of the current bias is still valid, meaning that the epoch 2458 // bits of the mark word are equal to the epoch bits of the 2459 // prototype header. (Note that the prototype header's epoch bits 2460 // only change at a safepoint.) If not, attempt to rebias the object 2461 // toward the current thread. Note that we must be absolutely sure 2462 // that the current epoch is invalid in order to do this because 2463 // otherwise the manipulations it performs on the mark word are 2464 // illegal. 2465 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2466 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2467 2468 // The epoch of the current bias is still valid but we know nothing 2469 // about the owner; it might be set or it might be clear. Try to 2470 // acquire the bias of the object using an atomic operation. If this 2471 // fails we will go in to the runtime to revoke the object's bias. 2472 // Note that we first construct the presumed unbiased header so we 2473 // don't accidentally blow away another thread's valid bias. 2474 delayed()->and3(mark_reg, 2475 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2476 mark_reg); 2477 or3(G2_thread, mark_reg, temp_reg); 2478 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2479 // If the biasing toward our thread failed, this means that 2480 // another thread succeeded in biasing it toward itself and we 2481 // need to revoke that bias. The revocation will occur in the 2482 // interpreter runtime in the slow case. 2483 cmp(mark_reg, temp_reg); 2484 if (counters != NULL) { 2485 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2486 } 2487 if (slow_case != NULL) { 2488 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2489 delayed()->nop(); 2490 } 2491 ba_short(done); 2492 2493 bind(try_rebias); 2494 // At this point we know the epoch has expired, meaning that the 2495 // current "bias owner", if any, is actually invalid. Under these 2496 // circumstances _only_, we are allowed to use the current header's 2497 // value as the comparison value when doing the cas to acquire the 2498 // bias in the current epoch. In other words, we allow transfer of 2499 // the bias from one thread to another directly in this situation. 2500 // 2501 // FIXME: due to a lack of registers we currently blow away the age 2502 // bits in this situation. Should attempt to preserve them. 2503 load_klass(obj_reg, temp_reg); 2504 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2505 or3(G2_thread, temp_reg, temp_reg); 2506 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2507 // If the biasing toward our thread failed, this means that 2508 // another thread succeeded in biasing it toward itself and we 2509 // need to revoke that bias. The revocation will occur in the 2510 // interpreter runtime in the slow case. 2511 cmp(mark_reg, temp_reg); 2512 if (counters != NULL) { 2513 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2514 } 2515 if (slow_case != NULL) { 2516 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2517 delayed()->nop(); 2518 } 2519 ba_short(done); 2520 2521 bind(try_revoke_bias); 2522 // The prototype mark in the klass doesn't have the bias bit set any 2523 // more, indicating that objects of this data type are not supposed 2524 // to be biased any more. We are going to try to reset the mark of 2525 // this object to the prototype value and fall through to the 2526 // CAS-based locking scheme. Note that if our CAS fails, it means 2527 // that another thread raced us for the privilege of revoking the 2528 // bias of this particular object, so it's okay to continue in the 2529 // normal locking code. 2530 // 2531 // FIXME: due to a lack of registers we currently blow away the age 2532 // bits in this situation. Should attempt to preserve them. 2533 load_klass(obj_reg, temp_reg); 2534 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2535 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2536 // Fall through to the normal CAS-based lock, because no matter what 2537 // the result of the above CAS, some thread must have succeeded in 2538 // removing the bias bit from the object's header. 2539 if (counters != NULL) { 2540 cmp(mark_reg, temp_reg); 2541 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2542 } 2543 2544 bind(cas_label); 2545 } 2546 2547 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2548 bool allow_delay_slot_filling) { 2549 // Check for biased locking unlock case, which is a no-op 2550 // Note: we do not have to check the thread ID for two reasons. 2551 // First, the interpreter checks for IllegalMonitorStateException at 2552 // a higher level. Second, if the bias was revoked while we held the 2553 // lock, the object could not be rebiased toward another thread, so 2554 // the bias bit would be clear. 2555 ld_ptr(mark_addr, temp_reg); 2556 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2557 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2558 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2559 delayed(); 2560 if (!allow_delay_slot_filling) { 2561 nop(); 2562 } 2563 } 2564 2565 2566 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2567 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2568 // The code could be tightened up considerably. 2569 // 2570 // box->dhw disposition - post-conditions at DONE_LABEL. 2571 // - Successful inflated lock: box->dhw != 0. 2572 // Any non-zero value suffices. 2573 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2574 // - Successful Stack-lock: box->dhw == mark. 2575 // box->dhw must contain the displaced mark word value 2576 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2577 // The slow-path fast_enter() and slow_enter() operators 2578 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2579 // - Biased: box->dhw is undefined 2580 // 2581 // SPARC refworkload performance - specifically jetstream and scimark - are 2582 // extremely sensitive to the size of the code emitted by compiler_lock_object 2583 // and compiler_unlock_object. Critically, the key factor is code size, not path 2584 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2585 // effect). 2586 2587 2588 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2589 Register Rbox, Register Rscratch, 2590 BiasedLockingCounters* counters, 2591 bool try_bias) { 2592 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2593 2594 verify_oop(Roop); 2595 Label done ; 2596 2597 if (counters != NULL) { 2598 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2599 } 2600 2601 if (EmitSync & 1) { 2602 mov(3, Rscratch); 2603 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2604 cmp(SP, G0); 2605 return ; 2606 } 2607 2608 if (EmitSync & 2) { 2609 2610 // Fetch object's markword 2611 ld_ptr(mark_addr, Rmark); 2612 2613 if (try_bias) { 2614 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2615 } 2616 2617 // Save Rbox in Rscratch to be used for the cas operation 2618 mov(Rbox, Rscratch); 2619 2620 // set Rmark to markOop | markOopDesc::unlocked_value 2621 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2622 2623 // Initialize the box. (Must happen before we update the object mark!) 2624 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2625 2626 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2627 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2628 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2629 2630 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2631 // hence we are done 2632 cmp(Rmark, Rscratch); 2633 sub(Rscratch, STACK_BIAS, Rscratch); 2634 brx(Assembler::equal, false, Assembler::pt, done); 2635 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2636 2637 // we did not find an unlocked object so see if this is a recursive case 2638 // sub(Rscratch, SP, Rscratch); 2639 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2640 andcc(Rscratch, 0xfffff003, Rscratch); 2641 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2642 bind (done); 2643 return ; 2644 } 2645 2646 Label Egress ; 2647 2648 if (EmitSync & 256) { 2649 Label IsInflated ; 2650 2651 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2652 // Triage: biased, stack-locked, neutral, inflated 2653 if (try_bias) { 2654 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2655 // Invariant: if control reaches this point in the emitted stream 2656 // then Rmark has not been modified. 2657 } 2658 2659 // Store mark into displaced mark field in the on-stack basic-lock "box" 2660 // Critically, this must happen before the CAS 2661 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2662 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2663 andcc(Rmark, 2, G0); 2664 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2665 delayed()-> 2666 2667 // Try stack-lock acquisition. 2668 // Beware: the 1st instruction is in a delay slot 2669 mov(Rbox, Rscratch); 2670 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2671 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2672 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2673 cmp(Rmark, Rscratch); 2674 brx(Assembler::equal, false, Assembler::pt, done); 2675 delayed()->sub(Rscratch, SP, Rscratch); 2676 2677 // Stack-lock attempt failed - check for recursive stack-lock. 2678 // See the comments below about how we might remove this case. 2679 sub(Rscratch, STACK_BIAS, Rscratch); 2680 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2681 andcc(Rscratch, 0xfffff003, Rscratch); 2682 br(Assembler::always, false, Assembler::pt, done); 2683 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2684 2685 bind(IsInflated); 2686 if (EmitSync & 64) { 2687 // If m->owner != null goto IsLocked 2688 // Pessimistic form: Test-and-CAS vs CAS 2689 // The optimistic form avoids RTS->RTO cache line upgrades. 2690 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2691 andcc(Rscratch, Rscratch, G0); 2692 brx(Assembler::notZero, false, Assembler::pn, done); 2693 delayed()->nop(); 2694 // m->owner == null : it's unlocked. 2695 } 2696 2697 // Try to CAS m->owner from null to Self 2698 // Invariant: if we acquire the lock then _recursions should be 0. 2699 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2700 mov(G2_thread, Rscratch); 2701 cas_ptr(Rmark, G0, Rscratch); 2702 cmp(Rscratch, G0); 2703 // Intentional fall-through into done 2704 } else { 2705 // Aggressively avoid the Store-before-CAS penalty 2706 // Defer the store into box->dhw until after the CAS 2707 Label IsInflated, Recursive ; 2708 2709 // Anticipate CAS -- Avoid RTS->RTO upgrade 2710 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2711 2712 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2713 // Triage: biased, stack-locked, neutral, inflated 2714 2715 if (try_bias) { 2716 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2717 // Invariant: if control reaches this point in the emitted stream 2718 // then Rmark has not been modified. 2719 } 2720 andcc(Rmark, 2, G0); 2721 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2722 delayed()-> // Beware - dangling delay-slot 2723 2724 // Try stack-lock acquisition. 2725 // Transiently install BUSY (0) encoding in the mark word. 2726 // if the CAS of 0 into the mark was successful then we execute: 2727 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2728 // ST obj->mark = box -- overwrite transient 0 value 2729 // This presumes TSO, of course. 2730 2731 mov(0, Rscratch); 2732 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2733 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2734 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2735 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2736 cmp(Rscratch, Rmark); 2737 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2738 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2739 if (counters != NULL) { 2740 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2741 } 2742 ba(done); 2743 delayed()->st_ptr(Rbox, mark_addr); 2744 2745 bind(Recursive); 2746 // Stack-lock attempt failed - check for recursive stack-lock. 2747 // Tests show that we can remove the recursive case with no impact 2748 // on refworkload 0.83. If we need to reduce the size of the code 2749 // emitted by compiler_lock_object() the recursive case is perfect 2750 // candidate. 2751 // 2752 // A more extreme idea is to always inflate on stack-lock recursion. 2753 // This lets us eliminate the recursive checks in compiler_lock_object 2754 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2755 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2756 // and showed a performance *increase*. In the same experiment I eliminated 2757 // the fast-path stack-lock code from the interpreter and always passed 2758 // control to the "slow" operators in synchronizer.cpp. 2759 2760 // RScratch contains the fetched obj->mark value from the failed CAS. 2761 sub(Rscratch, STACK_BIAS, Rscratch); 2762 sub(Rscratch, SP, Rscratch); 2763 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2764 andcc(Rscratch, 0xfffff003, Rscratch); 2765 if (counters != NULL) { 2766 // Accounting needs the Rscratch register 2767 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2768 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2769 ba_short(done); 2770 } else { 2771 ba(done); 2772 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2773 } 2774 2775 bind (IsInflated); 2776 2777 // Try to CAS m->owner from null to Self 2778 // Invariant: if we acquire the lock then _recursions should be 0. 2779 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2780 mov(G2_thread, Rscratch); 2781 cas_ptr(Rmark, G0, Rscratch); 2782 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2783 // set icc.zf : 1=success 0=failure 2784 // ST box->displaced_header = NonZero. 2785 // Any non-zero value suffices: 2786 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2787 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2788 // Intentional fall-through into done 2789 } 2790 2791 bind (done); 2792 } 2793 2794 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2795 Register Rbox, Register Rscratch, 2796 bool try_bias) { 2797 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2798 2799 Label done ; 2800 2801 if (EmitSync & 4) { 2802 cmp(SP, G0); 2803 return ; 2804 } 2805 2806 if (EmitSync & 8) { 2807 if (try_bias) { 2808 biased_locking_exit(mark_addr, Rscratch, done); 2809 } 2810 2811 // Test first if it is a fast recursive unlock 2812 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2813 br_null_short(Rmark, Assembler::pt, done); 2814 2815 // Check if it is still a light weight lock, this is is true if we see 2816 // the stack address of the basicLock in the markOop of the object 2817 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2818 cas_ptr(mark_addr.base(), Rbox, Rmark); 2819 ba(done); 2820 delayed()->cmp(Rbox, Rmark); 2821 bind(done); 2822 return ; 2823 } 2824 2825 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2826 // is too large performance rolls abruptly off a cliff. 2827 // This could be related to inlining policies, code cache management, or 2828 // I$ effects. 2829 Label LStacked ; 2830 2831 if (try_bias) { 2832 // TODO: eliminate redundant LDs of obj->mark 2833 biased_locking_exit(mark_addr, Rscratch, done); 2834 } 2835 2836 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2837 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2838 andcc(Rscratch, Rscratch, G0); 2839 brx(Assembler::zero, false, Assembler::pn, done); 2840 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2841 andcc(Rmark, 2, G0); 2842 brx(Assembler::zero, false, Assembler::pt, LStacked); 2843 delayed()->nop(); 2844 2845 // It's inflated 2846 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2847 // the ST of 0 into _owner which releases the lock. This prevents loads 2848 // and stores within the critical section from reordering (floating) 2849 // past the store that releases the lock. But TSO is a strong memory model 2850 // and that particular flavor of barrier is a noop, so we can safely elide it. 2851 // Note that we use 1-0 locking by default for the inflated case. We 2852 // close the resultant (and rare) race by having contended threads in 2853 // monitorenter periodically poll _owner. 2854 2855 if (EmitSync & 1024) { 2856 // Emit code to check that _owner == Self 2857 // We could fold the _owner test into subsequent code more efficiently 2858 // than using a stand-alone check, but since _owner checking is off by 2859 // default we don't bother. We also might consider predicating the 2860 // _owner==Self check on Xcheck:jni or running on a debug build. 2861 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2862 orcc(Rscratch, G0, G0); 2863 brx(Assembler::notZero, false, Assembler::pn, done); 2864 delayed()->nop(); 2865 } 2866 2867 if (EmitSync & 512) { 2868 // classic lock release code absent 1-0 locking 2869 // m->Owner = null; 2870 // membar #storeload 2871 // if (m->cxq|m->EntryList) == null goto Success 2872 // if (m->succ != null) goto Success 2873 // if CAS (&m->Owner,0,Self) != 0 goto Success 2874 // goto SlowPath 2875 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2876 orcc(Rbox, G0, G0); 2877 brx(Assembler::notZero, false, Assembler::pn, done); 2878 delayed()->nop(); 2879 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2880 if (os::is_MP()) { membar(StoreLoad); } 2881 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2882 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2883 orcc(Rbox, Rscratch, G0); 2884 brx(Assembler::zero, false, Assembler::pt, done); 2885 delayed()-> 2886 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2887 andcc(Rscratch, Rscratch, G0); 2888 brx(Assembler::notZero, false, Assembler::pt, done); 2889 delayed()->andcc(G0, G0, G0); 2890 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2891 mov(G2_thread, Rscratch); 2892 cas_ptr(Rmark, G0, Rscratch); 2893 cmp(Rscratch, G0); 2894 // invert icc.zf and goto done 2895 brx(Assembler::notZero, false, Assembler::pt, done); 2896 delayed()->cmp(G0, G0); 2897 br(Assembler::always, false, Assembler::pt, done); 2898 delayed()->cmp(G0, 1); 2899 } else { 2900 // 1-0 form : avoids CAS and MEMBAR in the common case 2901 // Do not bother to ratify that m->Owner == Self. 2902 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2903 orcc(Rbox, G0, G0); 2904 brx(Assembler::notZero, false, Assembler::pn, done); 2905 delayed()-> 2906 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2907 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2908 orcc(Rbox, Rscratch, G0); 2909 if (EmitSync & 16384) { 2910 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2911 // we should transfer control directly to the slow-path. 2912 // This test makes the reacquire operation below very infrequent. 2913 // The logic is equivalent to : 2914 // if (cxq|EntryList) == null : Owner=null; goto Success 2915 // if succ == null : goto SlowPath 2916 // Owner=null; membar #storeload 2917 // if succ != null : goto Success 2918 // if CAS(&Owner,null,Self) != null goto Success 2919 // goto SlowPath 2920 brx(Assembler::zero, true, Assembler::pt, done); 2921 delayed()-> 2922 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2923 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2924 andcc(Rscratch, Rscratch, G0) ; 2925 brx(Assembler::zero, false, Assembler::pt, done); 2926 delayed()->orcc(G0, 1, G0); 2927 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2928 } else { 2929 brx(Assembler::zero, false, Assembler::pt, done); 2930 delayed()-> 2931 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2932 } 2933 if (os::is_MP()) { membar(StoreLoad); } 2934 // Check that _succ is (or remains) non-zero 2935 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2936 andcc(Rscratch, Rscratch, G0); 2937 brx(Assembler::notZero, false, Assembler::pt, done); 2938 delayed()->andcc(G0, G0, G0); 2939 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2940 mov(G2_thread, Rscratch); 2941 cas_ptr(Rmark, G0, Rscratch); 2942 cmp(Rscratch, G0); 2943 // invert icc.zf and goto done 2944 // A slightly better v8+/v9 idiom would be the following: 2945 // movrnz Rscratch,1,Rscratch 2946 // ba done 2947 // xorcc Rscratch,1,G0 2948 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2949 brx(Assembler::notZero, false, Assembler::pt, done); 2950 delayed()->cmp(G0, G0); 2951 br(Assembler::always, false, Assembler::pt, done); 2952 delayed()->cmp(G0, 1); 2953 } 2954 2955 bind (LStacked); 2956 // Consider: we could replace the expensive CAS in the exit 2957 // path with a simple ST of the displaced mark value fetched from 2958 // the on-stack basiclock box. That admits a race where a thread T2 2959 // in the slow lock path -- inflating with monitor M -- could race a 2960 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2961 // More precisely T1 in the stack-lock unlock path could "stomp" the 2962 // inflated mark value M installed by T2, resulting in an orphan 2963 // object monitor M and T2 becoming stranded. We can remedy that situation 2964 // by having T2 periodically poll the object's mark word using timed wait 2965 // operations. If T2 discovers that a stomp has occurred it vacates 2966 // the monitor M and wakes any other threads stranded on the now-orphan M. 2967 // In addition the monitor scavenger, which performs deflation, 2968 // would also need to check for orpan monitors and stranded threads. 2969 // 2970 // Finally, inflation is also used when T2 needs to assign a hashCode 2971 // to O and O is stack-locked by T1. The "stomp" race could cause 2972 // an assigned hashCode value to be lost. We can avoid that condition 2973 // and provide the necessary hashCode stability invariants by ensuring 2974 // that hashCode generation is idempotent between copying GCs. 2975 // For example we could compute the hashCode of an object O as 2976 // O's heap address XOR some high quality RNG value that is refreshed 2977 // at GC-time. The monitor scavenger would install the hashCode 2978 // found in any orphan monitors. Again, the mechanism admits a 2979 // lost-update "stomp" WAW race but detects and recovers as needed. 2980 // 2981 // A prototype implementation showed excellent results, although 2982 // the scavenger and timeout code was rather involved. 2983 2984 cas_ptr(mark_addr.base(), Rbox, Rscratch); 2985 cmp(Rbox, Rscratch); 2986 // Intentional fall through into done ... 2987 2988 bind(done); 2989 } 2990 2991 2992 2993 void MacroAssembler::print_CPU_state() { 2994 // %%%%% need to implement this 2995 } 2996 2997 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 2998 // %%%%% need to implement this 2999 } 3000 3001 void MacroAssembler::push_IU_state() { 3002 // %%%%% need to implement this 3003 } 3004 3005 3006 void MacroAssembler::pop_IU_state() { 3007 // %%%%% need to implement this 3008 } 3009 3010 3011 void MacroAssembler::push_FPU_state() { 3012 // %%%%% need to implement this 3013 } 3014 3015 3016 void MacroAssembler::pop_FPU_state() { 3017 // %%%%% need to implement this 3018 } 3019 3020 3021 void MacroAssembler::push_CPU_state() { 3022 // %%%%% need to implement this 3023 } 3024 3025 3026 void MacroAssembler::pop_CPU_state() { 3027 // %%%%% need to implement this 3028 } 3029 3030 3031 3032 void MacroAssembler::verify_tlab() { 3033 #ifdef ASSERT 3034 if (UseTLAB && VerifyOops) { 3035 Label next, next2, ok; 3036 Register t1 = L0; 3037 Register t2 = L1; 3038 Register t3 = L2; 3039 3040 save_frame(0); 3041 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3042 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3043 or3(t1, t2, t3); 3044 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3045 STOP("assert(top >= start)"); 3046 should_not_reach_here(); 3047 3048 bind(next); 3049 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3050 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3051 or3(t3, t2, t3); 3052 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3053 STOP("assert(top <= end)"); 3054 should_not_reach_here(); 3055 3056 bind(next2); 3057 and3(t3, MinObjAlignmentInBytesMask, t3); 3058 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3059 STOP("assert(aligned)"); 3060 should_not_reach_here(); 3061 3062 bind(ok); 3063 restore(); 3064 } 3065 #endif 3066 } 3067 3068 3069 void MacroAssembler::eden_allocate( 3070 Register obj, // result: pointer to object after successful allocation 3071 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3072 int con_size_in_bytes, // object size in bytes if known at compile time 3073 Register t1, // temp register 3074 Register t2, // temp register 3075 Label& slow_case // continuation point if fast allocation fails 3076 ){ 3077 // make sure arguments make sense 3078 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3079 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3080 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3081 3082 if (!Universe::heap()->supports_inline_contig_alloc()) { 3083 // No allocation in the shared eden. 3084 ba(slow_case); 3085 delayed()->nop(); 3086 } else { 3087 // get eden boundaries 3088 // note: we need both top & top_addr! 3089 const Register top_addr = t1; 3090 const Register end = t2; 3091 3092 CollectedHeap* ch = Universe::heap(); 3093 set((intx)ch->top_addr(), top_addr); 3094 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3095 ld_ptr(top_addr, delta, end); 3096 ld_ptr(top_addr, 0, obj); 3097 3098 // try to allocate 3099 Label retry; 3100 bind(retry); 3101 #ifdef ASSERT 3102 // make sure eden top is properly aligned 3103 { 3104 Label L; 3105 btst(MinObjAlignmentInBytesMask, obj); 3106 br(Assembler::zero, false, Assembler::pt, L); 3107 delayed()->nop(); 3108 STOP("eden top is not properly aligned"); 3109 bind(L); 3110 } 3111 #endif // ASSERT 3112 const Register free = end; 3113 sub(end, obj, free); // compute amount of free space 3114 if (var_size_in_bytes->is_valid()) { 3115 // size is unknown at compile time 3116 cmp(free, var_size_in_bytes); 3117 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3118 delayed()->add(obj, var_size_in_bytes, end); 3119 } else { 3120 // size is known at compile time 3121 cmp(free, con_size_in_bytes); 3122 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3123 delayed()->add(obj, con_size_in_bytes, end); 3124 } 3125 // Compare obj with the value at top_addr; if still equal, swap the value of 3126 // end with the value at top_addr. If not equal, read the value at top_addr 3127 // into end. 3128 cas_ptr(top_addr, obj, end); 3129 // if someone beat us on the allocation, try again, otherwise continue 3130 cmp(obj, end); 3131 brx(Assembler::notEqual, false, Assembler::pn, retry); 3132 delayed()->mov(end, obj); // nop if successfull since obj == end 3133 3134 #ifdef ASSERT 3135 // make sure eden top is properly aligned 3136 { 3137 Label L; 3138 const Register top_addr = t1; 3139 3140 set((intx)ch->top_addr(), top_addr); 3141 ld_ptr(top_addr, 0, top_addr); 3142 btst(MinObjAlignmentInBytesMask, top_addr); 3143 br(Assembler::zero, false, Assembler::pt, L); 3144 delayed()->nop(); 3145 STOP("eden top is not properly aligned"); 3146 bind(L); 3147 } 3148 #endif // ASSERT 3149 } 3150 } 3151 3152 3153 void MacroAssembler::tlab_allocate( 3154 Register obj, // result: pointer to object after successful allocation 3155 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3156 int con_size_in_bytes, // object size in bytes if known at compile time 3157 Register t1, // temp register 3158 Label& slow_case // continuation point if fast allocation fails 3159 ){ 3160 // make sure arguments make sense 3161 assert_different_registers(obj, var_size_in_bytes, t1); 3162 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3163 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3164 3165 const Register free = t1; 3166 3167 verify_tlab(); 3168 3169 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3170 3171 // calculate amount of free space 3172 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3173 sub(free, obj, free); 3174 3175 Label done; 3176 if (var_size_in_bytes == noreg) { 3177 cmp(free, con_size_in_bytes); 3178 } else { 3179 cmp(free, var_size_in_bytes); 3180 } 3181 br(Assembler::less, false, Assembler::pn, slow_case); 3182 // calculate the new top pointer 3183 if (var_size_in_bytes == noreg) { 3184 delayed()->add(obj, con_size_in_bytes, free); 3185 } else { 3186 delayed()->add(obj, var_size_in_bytes, free); 3187 } 3188 3189 bind(done); 3190 3191 #ifdef ASSERT 3192 // make sure new free pointer is properly aligned 3193 { 3194 Label L; 3195 btst(MinObjAlignmentInBytesMask, free); 3196 br(Assembler::zero, false, Assembler::pt, L); 3197 delayed()->nop(); 3198 STOP("updated TLAB free is not properly aligned"); 3199 bind(L); 3200 } 3201 #endif // ASSERT 3202 3203 // update the tlab top pointer 3204 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3205 verify_tlab(); 3206 } 3207 3208 3209 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3210 Register top = O0; 3211 Register t1 = G1; 3212 Register t2 = G3; 3213 Register t3 = O1; 3214 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3215 Label do_refill, discard_tlab; 3216 3217 if (!Universe::heap()->supports_inline_contig_alloc()) { 3218 // No allocation in the shared eden. 3219 ba(slow_case); 3220 delayed()->nop(); 3221 } 3222 3223 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3224 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3225 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3226 3227 // calculate amount of free space 3228 sub(t1, top, t1); 3229 srl_ptr(t1, LogHeapWordSize, t1); 3230 3231 // Retain tlab and allocate object in shared space if 3232 // the amount free in the tlab is too large to discard. 3233 cmp(t1, t2); 3234 3235 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3236 // increment waste limit to prevent getting stuck on this slow path 3237 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3238 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3239 } else { 3240 delayed()->nop(); 3241 // set64 does not use the temp register if the given constant is 32 bit. So 3242 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3243 // of that value. 3244 set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); 3245 add(t2, t3, t2); 3246 } 3247 3248 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3249 if (TLABStats) { 3250 // increment number of slow_allocations 3251 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3252 add(t2, 1, t2); 3253 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3254 } 3255 ba(try_eden); 3256 delayed()->nop(); 3257 3258 bind(discard_tlab); 3259 if (TLABStats) { 3260 // increment number of refills 3261 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3262 add(t2, 1, t2); 3263 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3264 // accumulate wastage 3265 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3266 add(t2, t1, t2); 3267 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3268 } 3269 3270 // if tlab is currently allocated (top or end != null) then 3271 // fill [top, end + alignment_reserve) with array object 3272 br_null_short(top, Assembler::pn, do_refill); 3273 3274 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3275 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3276 // set klass to intArrayKlass 3277 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3278 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3279 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3280 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3281 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3282 ld_ptr(t2, 0, t2); 3283 // store klass last. concurrent gcs assumes klass length is valid if 3284 // klass field is not null. 3285 store_klass(t2, top); 3286 verify_oop(top); 3287 3288 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3289 sub(top, t1, t1); // size of tlab's allocated portion 3290 incr_allocated_bytes(t1, t2, t3); 3291 3292 // refill the tlab with an eden allocation 3293 bind(do_refill); 3294 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3295 sll_ptr(t1, LogHeapWordSize, t1); 3296 // allocate new tlab, address returned in top 3297 eden_allocate(top, t1, 0, t2, t3, slow_case); 3298 3299 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3300 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3301 #ifdef ASSERT 3302 // check that tlab_size (t1) is still valid 3303 { 3304 Label ok; 3305 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3306 sll_ptr(t2, LogHeapWordSize, t2); 3307 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3308 STOP("assert(t1 == tlab_size)"); 3309 should_not_reach_here(); 3310 3311 bind(ok); 3312 } 3313 #endif // ASSERT 3314 add(top, t1, top); // t1 is tlab_size 3315 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3316 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3317 3318 if (ZeroTLAB) { 3319 // This is a fast TLAB refill, therefore the GC is not notified of it. 3320 // So compiled code must fill the new TLAB with zeroes. 3321 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3322 zero_memory(t2, t1); 3323 } 3324 verify_tlab(); 3325 ba(retry); 3326 delayed()->nop(); 3327 } 3328 3329 void MacroAssembler::zero_memory(Register base, Register index) { 3330 assert_different_registers(base, index); 3331 Label loop; 3332 bind(loop); 3333 subcc(index, HeapWordSize, index); 3334 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3335 delayed()->st_ptr(G0, base, index); 3336 } 3337 3338 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3339 Register t1, Register t2) { 3340 // Bump total bytes allocated by this thread 3341 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3342 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3343 // v8 support has gone the way of the dodo 3344 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3345 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3346 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3347 } 3348 3349 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3350 switch (cond) { 3351 // Note some conditions are synonyms for others 3352 case Assembler::never: return Assembler::always; 3353 case Assembler::zero: return Assembler::notZero; 3354 case Assembler::lessEqual: return Assembler::greater; 3355 case Assembler::less: return Assembler::greaterEqual; 3356 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3357 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3358 case Assembler::negative: return Assembler::positive; 3359 case Assembler::overflowSet: return Assembler::overflowClear; 3360 case Assembler::always: return Assembler::never; 3361 case Assembler::notZero: return Assembler::zero; 3362 case Assembler::greater: return Assembler::lessEqual; 3363 case Assembler::greaterEqual: return Assembler::less; 3364 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3365 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3366 case Assembler::positive: return Assembler::negative; 3367 case Assembler::overflowClear: return Assembler::overflowSet; 3368 } 3369 3370 ShouldNotReachHere(); return Assembler::overflowClear; 3371 } 3372 3373 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3374 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3375 Condition negated_cond = negate_condition(cond); 3376 Label L; 3377 brx(negated_cond, false, Assembler::pt, L); 3378 delayed()->nop(); 3379 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3380 bind(L); 3381 } 3382 3383 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3384 AddressLiteral addrlit(counter_addr); 3385 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3386 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3387 ld(addr, Rtmp2); 3388 inc(Rtmp2); 3389 st(Rtmp2, addr); 3390 } 3391 3392 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3393 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3394 } 3395 3396 SkipIfEqual::SkipIfEqual( 3397 MacroAssembler* masm, Register temp, const bool* flag_addr, 3398 Assembler::Condition condition) { 3399 _masm = masm; 3400 AddressLiteral flag(flag_addr); 3401 _masm->sethi(flag, temp); 3402 _masm->ldub(temp, flag.low10(), temp); 3403 _masm->tst(temp); 3404 _masm->br(condition, false, Assembler::pt, _label); 3405 _masm->delayed()->nop(); 3406 } 3407 3408 SkipIfEqual::~SkipIfEqual() { 3409 _masm->bind(_label); 3410 } 3411 3412 3413 // Writes to stack successive pages until offset reached to check for 3414 // stack overflow + shadow pages. This clobbers tsp and scratch. 3415 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3416 Register Rscratch) { 3417 // Use stack pointer in temp stack pointer 3418 mov(SP, Rtsp); 3419 3420 // Bang stack for total size given plus stack shadow page size. 3421 // Bang one page at a time because a large size can overflow yellow and 3422 // red zones (the bang will fail but stack overflow handling can't tell that 3423 // it was a stack overflow bang vs a regular segv). 3424 int offset = os::vm_page_size(); 3425 Register Roffset = Rscratch; 3426 3427 Label loop; 3428 bind(loop); 3429 set((-offset)+STACK_BIAS, Rscratch); 3430 st(G0, Rtsp, Rscratch); 3431 set(offset, Roffset); 3432 sub(Rsize, Roffset, Rsize); 3433 cmp(Rsize, G0); 3434 br(Assembler::greater, false, Assembler::pn, loop); 3435 delayed()->sub(Rtsp, Roffset, Rtsp); 3436 3437 // Bang down shadow pages too. 3438 // At this point, (tmp-0) is the last address touched, so don't 3439 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3440 // was post-decremented.) Skip this address by starting at i=1, and 3441 // touch a few more pages below. N.B. It is important to touch all 3442 // the way down to and including i=StackShadowPages. 3443 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3444 set((-i*offset)+STACK_BIAS, Rscratch); 3445 st(G0, Rtsp, Rscratch); 3446 } 3447 } 3448 3449 void MacroAssembler::reserved_stack_check() { 3450 // testing if reserved zone needs to be enabled 3451 Label no_reserved_zone_enabling; 3452 3453 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3454 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3455 3456 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3457 3458 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3459 jump_to(stub, G4_scratch); 3460 delayed()->restore(); 3461 3462 should_not_reach_here(); 3463 3464 bind(no_reserved_zone_enabling); 3465 } 3466 3467 void MacroAssembler::load_mirror(Register mirror, Register method) { 3468 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3469 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3470 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3471 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3472 ld_ptr(mirror, mirror_offset, mirror); 3473 } 3474 3475 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3476 // The number of bytes in this code is used by 3477 // MachCallDynamicJavaNode::ret_addr_offset() 3478 // if this changes, change that. 3479 if (UseCompressedClassPointers) { 3480 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3481 decode_klass_not_null(klass); 3482 } else { 3483 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3484 } 3485 } 3486 3487 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3488 if (UseCompressedClassPointers) { 3489 assert(dst_oop != klass, "not enough registers"); 3490 encode_klass_not_null(klass); 3491 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3492 } else { 3493 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3494 } 3495 } 3496 3497 void MacroAssembler::store_klass_gap(Register s, Register d) { 3498 if (UseCompressedClassPointers) { 3499 assert(s != d, "not enough registers"); 3500 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3501 } 3502 } 3503 3504 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3505 if (UseCompressedOops) { 3506 lduw(s, d); 3507 decode_heap_oop(d); 3508 } else { 3509 ld_ptr(s, d); 3510 } 3511 } 3512 3513 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3514 if (UseCompressedOops) { 3515 lduw(s1, s2, d); 3516 decode_heap_oop(d, d); 3517 } else { 3518 ld_ptr(s1, s2, d); 3519 } 3520 } 3521 3522 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3523 if (UseCompressedOops) { 3524 lduw(s1, simm13a, d); 3525 decode_heap_oop(d, d); 3526 } else { 3527 ld_ptr(s1, simm13a, d); 3528 } 3529 } 3530 3531 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3532 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3533 else load_heap_oop(s1, s2.as_register(), d); 3534 } 3535 3536 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3537 if (UseCompressedOops) { 3538 assert(s1 != d && s2 != d, "not enough registers"); 3539 encode_heap_oop(d); 3540 st(d, s1, s2); 3541 } else { 3542 st_ptr(d, s1, s2); 3543 } 3544 } 3545 3546 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3547 if (UseCompressedOops) { 3548 assert(s1 != d, "not enough registers"); 3549 encode_heap_oop(d); 3550 st(d, s1, simm13a); 3551 } else { 3552 st_ptr(d, s1, simm13a); 3553 } 3554 } 3555 3556 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3557 if (UseCompressedOops) { 3558 assert(a.base() != d, "not enough registers"); 3559 encode_heap_oop(d); 3560 st(d, a, offset); 3561 } else { 3562 st_ptr(d, a, offset); 3563 } 3564 } 3565 3566 3567 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3568 assert (UseCompressedOops, "must be compressed"); 3569 assert (Universe::heap() != NULL, "java heap should be initialized"); 3570 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3571 verify_oop(src); 3572 if (Universe::narrow_oop_base() == NULL) { 3573 srlx(src, LogMinObjAlignmentInBytes, dst); 3574 return; 3575 } 3576 Label done; 3577 if (src == dst) { 3578 // optimize for frequent case src == dst 3579 bpr(rc_nz, true, Assembler::pt, src, done); 3580 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3581 bind(done); 3582 srlx(src, LogMinObjAlignmentInBytes, dst); 3583 } else { 3584 bpr(rc_z, false, Assembler::pn, src, done); 3585 delayed() -> mov(G0, dst); 3586 // could be moved before branch, and annulate delay, 3587 // but may add some unneeded work decoding null 3588 sub(src, G6_heapbase, dst); 3589 srlx(dst, LogMinObjAlignmentInBytes, dst); 3590 bind(done); 3591 } 3592 } 3593 3594 3595 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3596 assert (UseCompressedOops, "must be compressed"); 3597 assert (Universe::heap() != NULL, "java heap should be initialized"); 3598 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3599 verify_oop(r); 3600 if (Universe::narrow_oop_base() != NULL) 3601 sub(r, G6_heapbase, r); 3602 srlx(r, LogMinObjAlignmentInBytes, r); 3603 } 3604 3605 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3606 assert (UseCompressedOops, "must be compressed"); 3607 assert (Universe::heap() != NULL, "java heap should be initialized"); 3608 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3609 verify_oop(src); 3610 if (Universe::narrow_oop_base() == NULL) { 3611 srlx(src, LogMinObjAlignmentInBytes, dst); 3612 } else { 3613 sub(src, G6_heapbase, dst); 3614 srlx(dst, LogMinObjAlignmentInBytes, dst); 3615 } 3616 } 3617 3618 // Same algorithm as oops.inline.hpp decode_heap_oop. 3619 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3620 assert (UseCompressedOops, "must be compressed"); 3621 assert (Universe::heap() != NULL, "java heap should be initialized"); 3622 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3623 sllx(src, LogMinObjAlignmentInBytes, dst); 3624 if (Universe::narrow_oop_base() != NULL) { 3625 Label done; 3626 bpr(rc_nz, true, Assembler::pt, dst, done); 3627 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3628 bind(done); 3629 } 3630 verify_oop(dst); 3631 } 3632 3633 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3634 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3635 // pd_code_size_limit. 3636 // Also do not verify_oop as this is called by verify_oop. 3637 assert (UseCompressedOops, "must be compressed"); 3638 assert (Universe::heap() != NULL, "java heap should be initialized"); 3639 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3640 sllx(r, LogMinObjAlignmentInBytes, r); 3641 if (Universe::narrow_oop_base() != NULL) 3642 add(r, G6_heapbase, r); 3643 } 3644 3645 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3646 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3647 // pd_code_size_limit. 3648 // Also do not verify_oop as this is called by verify_oop. 3649 assert (UseCompressedOops, "must be compressed"); 3650 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3651 sllx(src, LogMinObjAlignmentInBytes, dst); 3652 if (Universe::narrow_oop_base() != NULL) 3653 add(dst, G6_heapbase, dst); 3654 } 3655 3656 void MacroAssembler::encode_klass_not_null(Register r) { 3657 assert (UseCompressedClassPointers, "must be compressed"); 3658 if (Universe::narrow_klass_base() != NULL) { 3659 assert(r != G6_heapbase, "bad register choice"); 3660 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3661 sub(r, G6_heapbase, r); 3662 if (Universe::narrow_klass_shift() != 0) { 3663 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3664 srlx(r, LogKlassAlignmentInBytes, r); 3665 } 3666 reinit_heapbase(); 3667 } else { 3668 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3669 srlx(r, Universe::narrow_klass_shift(), r); 3670 } 3671 } 3672 3673 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3674 if (src == dst) { 3675 encode_klass_not_null(src); 3676 } else { 3677 assert (UseCompressedClassPointers, "must be compressed"); 3678 if (Universe::narrow_klass_base() != NULL) { 3679 set((intptr_t)Universe::narrow_klass_base(), dst); 3680 sub(src, dst, dst); 3681 if (Universe::narrow_klass_shift() != 0) { 3682 srlx(dst, LogKlassAlignmentInBytes, dst); 3683 } 3684 } else { 3685 // shift src into dst 3686 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3687 srlx(src, Universe::narrow_klass_shift(), dst); 3688 } 3689 } 3690 } 3691 3692 // Function instr_size_for_decode_klass_not_null() counts the instructions 3693 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3694 // the instructions they generate change, then this method needs to be updated. 3695 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3696 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3697 int num_instrs = 1; // shift src,dst or add 3698 if (Universe::narrow_klass_base() != NULL) { 3699 // set + add + set 3700 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 3701 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 3702 if (Universe::narrow_klass_shift() != 0) { 3703 num_instrs += 1; // sllx 3704 } 3705 } 3706 return num_instrs * BytesPerInstWord; 3707 } 3708 3709 // !!! If the instructions that get generated here change then function 3710 // instr_size_for_decode_klass_not_null() needs to get updated. 3711 void MacroAssembler::decode_klass_not_null(Register r) { 3712 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3713 // pd_code_size_limit. 3714 assert (UseCompressedClassPointers, "must be compressed"); 3715 if (Universe::narrow_klass_base() != NULL) { 3716 assert(r != G6_heapbase, "bad register choice"); 3717 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3718 if (Universe::narrow_klass_shift() != 0) 3719 sllx(r, LogKlassAlignmentInBytes, r); 3720 add(r, G6_heapbase, r); 3721 reinit_heapbase(); 3722 } else { 3723 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3724 sllx(r, Universe::narrow_klass_shift(), r); 3725 } 3726 } 3727 3728 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 3729 if (src == dst) { 3730 decode_klass_not_null(src); 3731 } else { 3732 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3733 // pd_code_size_limit. 3734 assert (UseCompressedClassPointers, "must be compressed"); 3735 if (Universe::narrow_klass_base() != NULL) { 3736 if (Universe::narrow_klass_shift() != 0) { 3737 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 3738 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3739 sllx(src, LogKlassAlignmentInBytes, dst); 3740 add(dst, G6_heapbase, dst); 3741 reinit_heapbase(); 3742 } else { 3743 set((intptr_t)Universe::narrow_klass_base(), dst); 3744 add(src, dst, dst); 3745 } 3746 } else { 3747 // shift/mov src into dst. 3748 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3749 sllx(src, Universe::narrow_klass_shift(), dst); 3750 } 3751 } 3752 } 3753 3754 void MacroAssembler::reinit_heapbase() { 3755 if (UseCompressedOops || UseCompressedClassPointers) { 3756 if (Universe::heap() != NULL) { 3757 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 3758 } else { 3759 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 3760 load_ptr_contents(base, G6_heapbase); 3761 } 3762 } 3763 } 3764 3765 #ifdef COMPILER2 3766 3767 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 3768 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 3769 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3770 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 3771 Label Lloop, Lslow; 3772 assert(UseVIS >= 3, "VIS3 is required"); 3773 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 3774 assert_different_registers(ftmp1, ftmp2, ftmp3); 3775 3776 // Check if cnt >= 8 (= 16 bytes) 3777 cmp(cnt, 8); 3778 br(Assembler::less, false, Assembler::pn, Lslow); 3779 delayed()->mov(cnt, result); // copy count 3780 3781 // Check for 8-byte alignment of src and dst 3782 or3(src, dst, tmp1); 3783 andcc(tmp1, 7, G0); 3784 br(Assembler::notZero, false, Assembler::pn, Lslow); 3785 delayed()->nop(); 3786 3787 // Set mask for bshuffle instruction 3788 Register mask = tmp4; 3789 set(0x13579bdf, mask); 3790 bmask(mask, G0, G0); 3791 3792 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 3793 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 3794 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 3795 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 3796 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 3797 3798 // Load first 8 bytes 3799 ldx(src, 0, tmp1); 3800 3801 bind(Lloop); 3802 // Load next 8 bytes 3803 ldx(src, 8, tmp2); 3804 3805 // Check for non-latin1 character by testing if the most significant byte of a char is set. 3806 // Although we have to move the data between integer and floating point registers, this is 3807 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 3808 or3(tmp1, tmp2, tmp3); 3809 btst(tmp3, mask); 3810 // annul zeroing if branch is not taken to preserve original count 3811 brx(Assembler::notZero, true, Assembler::pn, Ldone); 3812 delayed()->mov(G0, result); // 0 - failed 3813 3814 // Move bytes into float register 3815 movxtod(tmp1, ftmp1); 3816 movxtod(tmp2, ftmp2); 3817 3818 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 3819 bshuffle(ftmp1, ftmp2, ftmp3); 3820 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3821 3822 // Increment addresses and decrement count 3823 inc(src, 16); 3824 inc(dst, 8); 3825 dec(cnt, 8); 3826 3827 cmp(cnt, 8); 3828 // annul LDX if branch is not taken to prevent access past end of string 3829 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3830 delayed()->ldx(src, 0, tmp1); 3831 3832 // Fallback to slow version 3833 bind(Lslow); 3834 } 3835 3836 // Compress char[] to byte[]. Return 0 on failure. 3837 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 3838 Label Lloop; 3839 assert_different_registers(src, dst, cnt, tmp, result); 3840 3841 lduh(src, 0, tmp); 3842 3843 bind(Lloop); 3844 inc(src, sizeof(jchar)); 3845 cmp(tmp, 0xff); 3846 // annul zeroing if branch is not taken to preserve original count 3847 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 3848 delayed()->mov(G0, result); // 0 - failed 3849 deccc(cnt); 3850 stb(tmp, dst, 0); 3851 inc(dst); 3852 // annul LDUH if branch is not taken to prevent access past end of string 3853 br(Assembler::notZero, true, Assembler::pt, Lloop); 3854 delayed()->lduh(src, 0, tmp); // hoisted 3855 } 3856 3857 // Inflate byte[] to char[] by inflating 16 bytes at once. 3858 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 3859 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 3860 Label Lloop, Lslow; 3861 assert(UseVIS >= 3, "VIS3 is required"); 3862 assert_different_registers(src, dst, cnt, tmp); 3863 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 3864 3865 // Check if cnt >= 8 (= 16 bytes) 3866 cmp(cnt, 8); 3867 br(Assembler::less, false, Assembler::pn, Lslow); 3868 delayed()->nop(); 3869 3870 // Check for 8-byte alignment of src and dst 3871 or3(src, dst, tmp); 3872 andcc(tmp, 7, G0); 3873 br(Assembler::notZero, false, Assembler::pn, Lslow); 3874 // Initialize float register to zero 3875 FloatRegister zerof = ftmp4; 3876 delayed()->fzero(FloatRegisterImpl::D, zerof); 3877 3878 // Load first 8 bytes 3879 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3880 3881 bind(Lloop); 3882 inc(src, 8); 3883 dec(cnt, 8); 3884 3885 // Inflate the string by interleaving each byte from the source array 3886 // with a zero byte and storing the result in the destination array. 3887 fpmerge(zerof, ftmp1->successor(), ftmp2); 3888 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 3889 fpmerge(zerof, ftmp1, ftmp3); 3890 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3891 3892 inc(dst, 16); 3893 3894 cmp(cnt, 8); 3895 // annul LDX if branch is not taken to prevent access past end of string 3896 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3897 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3898 3899 // Fallback to slow version 3900 bind(Lslow); 3901 } 3902 3903 // Inflate byte[] to char[]. 3904 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 3905 Label Loop; 3906 assert_different_registers(src, dst, cnt, tmp); 3907 3908 ldub(src, 0, tmp); 3909 bind(Loop); 3910 inc(src); 3911 deccc(cnt); 3912 sth(tmp, dst, 0); 3913 inc(dst, sizeof(jchar)); 3914 // annul LDUB if branch is not taken to prevent access past end of string 3915 br(Assembler::notZero, true, Assembler::pt, Loop); 3916 delayed()->ldub(src, 0, tmp); // hoisted 3917 } 3918 3919 void MacroAssembler::string_compare(Register str1, Register str2, 3920 Register cnt1, Register cnt2, 3921 Register tmp1, Register tmp2, 3922 Register result, int ae) { 3923 Label Ldone, Lloop; 3924 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 3925 int stride1, stride2; 3926 3927 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 3928 // we interchange str1 and str2 in the UL case and negate the result. 3929 // Like this, str1 is always latin1 encoded, expect for the UU case. 3930 3931 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3932 srl(cnt2, 1, cnt2); 3933 } 3934 3935 // See if the lengths are different, and calculate min in cnt1. 3936 // Save diff in case we need it for a tie-breaker. 3937 Label Lskip; 3938 Register diff = tmp1; 3939 subcc(cnt1, cnt2, diff); 3940 br(Assembler::greater, true, Assembler::pt, Lskip); 3941 // cnt2 is shorter, so use its count: 3942 delayed()->mov(cnt2, cnt1); 3943 bind(Lskip); 3944 3945 // Rename registers 3946 Register limit1 = cnt1; 3947 Register limit2 = limit1; 3948 Register chr1 = result; 3949 Register chr2 = cnt2; 3950 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3951 // We need an additional register to keep track of two limits 3952 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 3953 limit2 = tmp2; 3954 } 3955 3956 // Is the minimum length zero? 3957 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 3958 br(Assembler::equal, true, Assembler::pn, Ldone); 3959 // result is difference in lengths 3960 if (ae == StrIntrinsicNode::UU) { 3961 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 3962 } else { 3963 delayed()->mov(diff, result); 3964 } 3965 3966 // Load first characters 3967 if (ae == StrIntrinsicNode::LL) { 3968 stride1 = stride2 = sizeof(jbyte); 3969 ldub(str1, 0, chr1); 3970 ldub(str2, 0, chr2); 3971 } else if (ae == StrIntrinsicNode::UU) { 3972 stride1 = stride2 = sizeof(jchar); 3973 lduh(str1, 0, chr1); 3974 lduh(str2, 0, chr2); 3975 } else { 3976 stride1 = sizeof(jbyte); 3977 stride2 = sizeof(jchar); 3978 ldub(str1, 0, chr1); 3979 lduh(str2, 0, chr2); 3980 } 3981 3982 // Compare first characters 3983 subcc(chr1, chr2, chr1); 3984 br(Assembler::notZero, false, Assembler::pt, Ldone); 3985 assert(chr1 == result, "result must be pre-placed"); 3986 delayed()->nop(); 3987 3988 // Check if the strings start at same location 3989 cmp(str1, str2); 3990 brx(Assembler::equal, true, Assembler::pn, Ldone); 3991 delayed()->mov(G0, result); // result is zero 3992 3993 // We have no guarantee that on 64 bit the higher half of limit is 0 3994 signx(limit1); 3995 3996 // Get limit 3997 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3998 sll(limit1, 1, limit2); 3999 subcc(limit2, stride2, chr2); 4000 } 4001 subcc(limit1, stride1, chr1); 4002 br(Assembler::zero, true, Assembler::pn, Ldone); 4003 // result is difference in lengths 4004 if (ae == StrIntrinsicNode::UU) { 4005 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4006 } else { 4007 delayed()->mov(diff, result); 4008 } 4009 4010 // Shift str1 and str2 to the end of the arrays, negate limit 4011 add(str1, limit1, str1); 4012 add(str2, limit2, str2); 4013 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4014 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4015 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4016 } 4017 4018 // Compare the rest of the characters 4019 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4020 4021 bind(Lloop); 4022 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4023 4024 subcc(chr1, chr2, chr1); 4025 br(Assembler::notZero, false, Assembler::pt, Ldone); 4026 assert(chr1 == result, "result must be pre-placed"); 4027 delayed()->inccc(limit1, stride1); 4028 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4029 inccc(limit2, stride2); 4030 } 4031 4032 // annul LDUB if branch is not taken to prevent access past end of string 4033 br(Assembler::notZero, true, Assembler::pt, Lloop); 4034 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4035 4036 // If strings are equal up to min length, return the length difference. 4037 if (ae == StrIntrinsicNode::UU) { 4038 // Divide by 2 to get number of chars 4039 sra(diff, 1, result); 4040 } else { 4041 mov(diff, result); 4042 } 4043 4044 // Otherwise, return the difference between the first mismatched chars. 4045 bind(Ldone); 4046 if(ae == StrIntrinsicNode::UL) { 4047 // Negate result (see note above) 4048 neg(result); 4049 } 4050 } 4051 4052 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4053 Register limit, Register tmp, Register result, bool is_byte) { 4054 Label Ldone, Lloop, Lremaining; 4055 assert_different_registers(ary1, ary2, limit, tmp, result); 4056 4057 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4058 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4059 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4060 4061 if (is_array_equ) { 4062 // return true if the same array 4063 cmp(ary1, ary2); 4064 brx(Assembler::equal, true, Assembler::pn, Ldone); 4065 delayed()->mov(1, result); // equal 4066 4067 br_null(ary1, true, Assembler::pn, Ldone); 4068 delayed()->clr(result); // not equal 4069 4070 br_null(ary2, true, Assembler::pn, Ldone); 4071 delayed()->clr(result); // not equal 4072 4073 // load the lengths of arrays 4074 ld(Address(ary1, length_offset), limit); 4075 ld(Address(ary2, length_offset), tmp); 4076 4077 // return false if the two arrays are not equal length 4078 cmp(limit, tmp); 4079 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4080 delayed()->clr(result); // not equal 4081 } 4082 4083 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4084 delayed()->mov(1, result); // zero-length arrays are equal 4085 4086 if (is_array_equ) { 4087 // load array addresses 4088 add(ary1, base_offset, ary1); 4089 add(ary2, base_offset, ary2); 4090 // set byte count 4091 if (!is_byte) { 4092 sll(limit, exact_log2(sizeof(jchar)), limit); 4093 } 4094 } else { 4095 // We have no guarantee that on 64 bit the higher half of limit is 0 4096 signx(limit); 4097 } 4098 4099 #ifdef ASSERT 4100 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4101 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4102 Label Laligned; 4103 or3(ary1, ary2, tmp); 4104 andcc(tmp, 7, tmp); 4105 br_null_short(tmp, Assembler::pn, Laligned); 4106 STOP("First array element is not 8-byte aligned."); 4107 should_not_reach_here(); 4108 bind(Laligned); 4109 #endif 4110 4111 // Shift ary1 and ary2 to the end of the arrays, negate limit 4112 add(ary1, limit, ary1); 4113 add(ary2, limit, ary2); 4114 neg(limit, limit); 4115 4116 // MAIN LOOP 4117 // Load and compare array elements of size 'byte_width' until the elements are not 4118 // equal or we reached the end of the arrays. If the size of the arrays is not a 4119 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4120 // compare the remaining bytes below by skipping the garbage bytes. 4121 ldx(ary1, limit, result); 4122 bind(Lloop); 4123 ldx(ary2, limit, tmp); 4124 inccc(limit, 8); 4125 // Bail out if we reached the end (but still do the comparison) 4126 br(Assembler::positive, false, Assembler::pn, Lremaining); 4127 delayed()->cmp(result, tmp); 4128 // Check equality of elements 4129 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4130 delayed()->ldx(ary1, limit, result); 4131 4132 ba(Ldone); 4133 delayed()->clr(result); // not equal 4134 4135 // TAIL COMPARISON 4136 // We got here because we reached the end of the arrays. 'limit' is the number of 4137 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4138 // out the garbage and compare the remaining elements. 4139 bind(Lremaining); 4140 // Optimistic shortcut: elements potentially including garbage are equal 4141 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4142 delayed()->mov(1, result); // equal 4143 // Shift 'limit' bytes to the right and compare 4144 sll(limit, 3, limit); // bytes to bits 4145 srlx(result, limit, result); 4146 srlx(tmp, limit, tmp); 4147 cmp(result, tmp); 4148 clr(result); 4149 movcc(Assembler::equal, false, xcc, 1, result); 4150 4151 bind(Ldone); 4152 } 4153 4154 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4155 4156 // test for negative bytes in input string of a given size 4157 // result 1 if found, 0 otherwise. 4158 4159 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4160 4161 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4162 4163 Register i = result; // result used as integer index i until very end 4164 Register lmask = t2; // t2 is aliased to lmask 4165 4166 // INITIALIZATION 4167 // =========================================================== 4168 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4169 // compute unaligned offset -> i 4170 // compute core end index -> t5 4171 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4172 add(t2, 0x80, t2); 4173 sllx(t2, 32, t3); 4174 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4175 sra(size,0,size); 4176 andcc(inp, 0x7, i); // unaligned offset -> i 4177 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4178 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4179 4180 // =========================================================== 4181 4182 // UNALIGNED HEAD 4183 // =========================================================== 4184 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4185 // * obliterate (ignore) bytes outside string by shifting off reg ends 4186 // * compare with bitmask, short circuit return true if one or more high 4187 // bits set. 4188 cmp(size, 0); 4189 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4190 delayed()->mov(0,result); // annuled so i not clobbered for following 4191 neg(i, t4); 4192 add(i, size, t5); 4193 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4194 mov(8, t4); 4195 sub(t4, t5, t4); 4196 sra(t4, 31, t5); 4197 andn(t4, t5, t5); 4198 add(i, t5, t4); 4199 sll(t5, 3, t5); 4200 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4201 srlx(t3, t5, t3); 4202 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4203 andcc(lmask, t3, G0); 4204 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4205 delayed()->mov(1,result); // annuled so i not clobbered for following 4206 add(size, -8, t5); // core end index -> t5 4207 mov(8, t4); 4208 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4209 // =========================================================== 4210 4211 // ALIGNED CORE 4212 // =========================================================== 4213 // * iterate index i over aligned 8B sections of core, comparing with 4214 // bitmask, short circuit return true if one or more high bits set 4215 // t5 contains core end index/loop limit which is the index 4216 // of the MSB of last (unaligned) 8B fully contained in the string. 4217 // inp contains address of first byte in string/array 4218 // lmask contains 8B high bit mask for comparison 4219 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4220 bind(Lcore); 4221 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4222 bind(Lcore_rpt); 4223 ldx(inp, i, t3); 4224 andcc(t3, lmask, G0); 4225 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4226 delayed()->mov(1, result); // annuled so i not clobbered for following 4227 add(i, 8, i); 4228 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4229 // =========================================================== 4230 4231 // ALIGNED TAIL (<8B) 4232 // =========================================================== 4233 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4234 // string bytes by shifting them off end, compare what's left with bitmask 4235 // inp contains address of first byte in string/array 4236 // lmask contains 8B high bit mask for comparison 4237 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4238 bind(Ltail); 4239 subcc(size, i, t4); // # of remaining bytes in string -> t4 4240 // return 0 if no more remaining bytes 4241 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4242 delayed()->mov(0, result); // annuled so i not clobbered for following 4243 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4244 mov(8, t5); 4245 sub(t5, t4, t4); 4246 mov(0, result); // ** i clobbered at this point 4247 sll(t4, 3, t4); // bits beyond end of string -> t4 4248 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4249 andcc(lmask, t3, G0); 4250 movcc(Assembler::notZero, false, xcc, 1, result); 4251 bind(Lreturn); 4252 } 4253 4254 #endif 4255 4256 4257 // Use BIS for zeroing (count is in bytes). 4258 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4259 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4260 Register end = count; 4261 int cache_line_size = VM_Version::prefetch_data_size(); 4262 assert(cache_line_size > 0, "cache line size should be known for this code"); 4263 // Minimum count when BIS zeroing can be used since 4264 // it needs membar which is expensive. 4265 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4266 4267 Label small_loop; 4268 // Check if count is negative (dead code) or zero. 4269 // Note, count uses 64bit in 64 bit VM. 4270 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4271 4272 // Use BIS zeroing only for big arrays since it requires membar. 4273 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4274 cmp(count, block_zero_size); 4275 } else { 4276 set(block_zero_size, temp); 4277 cmp(count, temp); 4278 } 4279 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4280 delayed()->add(to, count, end); 4281 4282 // Note: size is >= three (32 bytes) cache lines. 4283 4284 // Clean the beginning of space up to next cache line. 4285 for (int offs = 0; offs < cache_line_size; offs += 8) { 4286 stx(G0, to, offs); 4287 } 4288 4289 // align to next cache line 4290 add(to, cache_line_size, to); 4291 and3(to, -cache_line_size, to); 4292 4293 // Note: size left >= two (32 bytes) cache lines. 4294 4295 // BIS should not be used to zero tail (64 bytes) 4296 // to avoid zeroing a header of the following object. 4297 sub(end, (cache_line_size*2)-8, end); 4298 4299 Label bis_loop; 4300 bind(bis_loop); 4301 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4302 add(to, cache_line_size, to); 4303 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4304 4305 // BIS needs membar. 4306 membar(Assembler::StoreLoad); 4307 4308 add(end, (cache_line_size*2)-8, end); // restore end 4309 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4310 4311 // Clean the tail. 4312 bind(small_loop); 4313 stx(G0, to, 0); 4314 add(to, 8, to); 4315 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4316 nop(); // Separate short branches 4317 } 4318 4319 /** 4320 * Update CRC-32[C] with a byte value according to constants in table 4321 * 4322 * @param [in,out]crc Register containing the crc. 4323 * @param [in]val Register containing the byte to fold into the CRC. 4324 * @param [in]table Register containing the table of crc constants. 4325 * 4326 * uint32_t crc; 4327 * val = crc_table[(val ^ crc) & 0xFF]; 4328 * crc = val ^ (crc >> 8); 4329 */ 4330 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4331 xor3(val, crc, val); 4332 and3(val, 0xFF, val); 4333 sllx(val, 2, val); 4334 lduw(table, val, val); 4335 srlx(crc, 8, crc); 4336 xor3(val, crc, crc); 4337 } 4338 4339 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4340 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4341 srlx(src, 24, dst); 4342 4343 sllx(src, 32+8, tmp); 4344 srlx(tmp, 32+24, tmp); 4345 sllx(tmp, 8, tmp); 4346 or3(dst, tmp, dst); 4347 4348 sllx(src, 32+16, tmp); 4349 srlx(tmp, 32+24, tmp); 4350 sllx(tmp, 16, tmp); 4351 or3(dst, tmp, dst); 4352 4353 sllx(src, 32+24, tmp); 4354 srlx(tmp, 32, tmp); 4355 or3(dst, tmp, dst); 4356 } 4357 4358 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4359 reverse_bytes_32(src, tmp1, tmp2); 4360 movxtod(tmp1, dst); 4361 } 4362 4363 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4364 movdtox(src, tmp1); 4365 reverse_bytes_32(tmp1, dst, tmp2); 4366 } 4367 4368 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4369 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4370 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4371 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4372 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4373 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4374 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4375 ldxl(buf, G0, xtmp_lo); 4376 inc(buf, 8); 4377 ldxl(buf, G0, xtmp_hi); 4378 inc(buf, 8); 4379 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4380 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4381 } 4382 4383 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4384 mov(xcrc_lo, xtmp_lo); 4385 mov(xcrc_hi, xtmp_hi); 4386 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4387 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4388 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4389 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4390 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4391 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4392 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4393 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4394 } 4395 4396 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4397 and3(xcrc, 0xFF, tmp); 4398 sllx(tmp, 2, tmp); 4399 lduw(table, tmp, xtmp); 4400 srlx(xcrc, 8, xcrc); 4401 xor3(xtmp, xcrc, xcrc); 4402 } 4403 4404 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4405 and3(crc, 0xFF, tmp); 4406 srlx(crc, 8, crc); 4407 sllx(tmp, 2, tmp); 4408 lduw(table, tmp, tmp); 4409 xor3(tmp, crc, crc); 4410 } 4411 4412 #define CRC32_TMP_REG_NUM 18 4413 4414 #define CRC32_CONST_64 0x163cd6124 4415 #define CRC32_CONST_96 0x0ccaa009e 4416 #define CRC32_CONST_160 0x1751997d0 4417 #define CRC32_CONST_480 0x1c6e41596 4418 #define CRC32_CONST_544 0x154442bd4 4419 4420 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4421 4422 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4423 Label L_main_loop_prologue; 4424 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4425 Label L_fold_tail, L_fold_tail_loop; 4426 Label L_8byte_fold_loop, L_8byte_fold_check; 4427 4428 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4429 4430 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4431 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4432 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4433 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4434 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4435 4436 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4437 4438 not1(crc); // ~c 4439 clruwu(crc); // clear upper 32 bits of crc 4440 4441 // Check if below cutoff, proceed directly to cleanup code 4442 mov(31, G4); 4443 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4444 4445 // Align buffer to 8 byte boundry 4446 mov(8, O5); 4447 and3(buf, 0x7, O4); 4448 sub(O5, O4, O5); 4449 and3(O5, 0x7, O5); 4450 sub(len, O5, len); 4451 ba(L_align_check); 4452 delayed()->nop(); 4453 4454 // Alignment loop, table look up method for up to 7 bytes 4455 bind(L_align_loop); 4456 ldub(buf, 0, O4); 4457 inc(buf); 4458 dec(O5); 4459 xor3(O4, crc, O4); 4460 and3(O4, 0xFF, O4); 4461 sllx(O4, 2, O4); 4462 lduw(table, O4, O4); 4463 srlx(crc, 8, crc); 4464 xor3(O4, crc, crc); 4465 bind(L_align_check); 4466 nop(); 4467 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4468 4469 // Aligned on 64-bit (8-byte) boundry at this point 4470 // Check if still above cutoff (31-bytes) 4471 mov(31, G4); 4472 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4473 // At least 32 bytes left to process 4474 4475 // Free up registers by storing them to FP registers 4476 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4477 movxtod(tmp[i], as_FloatRegister(2*i)); 4478 } 4479 4480 // Determine which loop to enter 4481 // Shared prologue 4482 ldxl(buf, G0, tmp[0]); 4483 inc(buf, 8); 4484 ldxl(buf, G0, tmp[1]); 4485 inc(buf, 8); 4486 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4487 and3(crc, 0, crc); // Clear out the crc register 4488 // Main loop needs 128-bytes at least 4489 mov(128, G4); 4490 mov(64, tmp[2]); 4491 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4492 // Less than 64 bytes 4493 nop(); 4494 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4495 // Between 64 and 127 bytes 4496 set64(CRC32_CONST_96, const_96, tmp[8]); 4497 set64(CRC32_CONST_160, const_160, tmp[9]); 4498 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4499 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4500 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4501 dec(len, 48); 4502 ba(L_fold_tail); 4503 delayed()->nop(); 4504 4505 bind(L_main_loop_prologue); 4506 for (int i = 2; i < 8; i++) { 4507 ldxl(buf, G0, tmp[i]); 4508 inc(buf, 8); 4509 } 4510 4511 // Fold total 512 bits of polynomial on each iteration, 4512 // 128 bits per each of 4 parallel streams 4513 set64(CRC32_CONST_480, const_480, tmp[8]); 4514 set64(CRC32_CONST_544, const_544, tmp[9]); 4515 4516 mov(128, G4); 4517 bind(L_fold_512b_loop); 4518 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4519 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4520 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4521 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4522 dec(len, 64); 4523 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4524 4525 // Fold 512 bits to 128 bits 4526 bind(L_fold_512b); 4527 set64(CRC32_CONST_96, const_96, tmp[8]); 4528 set64(CRC32_CONST_160, const_160, tmp[9]); 4529 4530 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4531 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4532 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4533 dec(len, 48); 4534 4535 // Fold the rest of 128 bits data chunks 4536 bind(L_fold_tail); 4537 mov(32, G4); 4538 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4539 4540 set64(CRC32_CONST_96, const_96, tmp[8]); 4541 set64(CRC32_CONST_160, const_160, tmp[9]); 4542 4543 bind(L_fold_tail_loop); 4544 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4545 sub(len, 16, len); 4546 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4547 4548 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4549 bind(L_fold_128b); 4550 4551 set64(CRC32_CONST_64, const_64, tmp[4]); 4552 4553 xmulx(const_64, tmp[0], tmp[2]); 4554 xmulxhi(const_64, tmp[0], tmp[3]); 4555 4556 srl(tmp[2], G0, tmp[4]); 4557 xmulx(const_64, tmp[4], tmp[4]); 4558 4559 srlx(tmp[2], 32, tmp[2]); 4560 sllx(tmp[3], 32, tmp[3]); 4561 or3(tmp[2], tmp[3], tmp[2]); 4562 4563 xor3(tmp[4], tmp[1], tmp[4]); 4564 xor3(tmp[4], tmp[2], tmp[1]); 4565 dec(len, 8); 4566 4567 // Use table lookup for the 8 bytes left in tmp[1] 4568 dec(len, 8); 4569 4570 // 8 8-bit folds to compute 32-bit CRC. 4571 for (int j = 0; j < 4; j++) { 4572 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4573 } 4574 srl(tmp[1], G0, crc); // move 32 bits to general register 4575 for (int j = 0; j < 4; j++) { 4576 fold_8bit_crc32(crc, table, tmp[3]); 4577 } 4578 4579 bind(L_8byte_fold_check); 4580 4581 // Restore int registers saved in FP registers 4582 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4583 movdtox(as_FloatRegister(2*i), tmp[i]); 4584 } 4585 4586 ba(L_cleanup_check); 4587 delayed()->nop(); 4588 4589 // Table look-up method for the remaining few bytes 4590 bind(L_cleanup_loop); 4591 ldub(buf, 0, O4); 4592 inc(buf); 4593 dec(len); 4594 xor3(O4, crc, O4); 4595 and3(O4, 0xFF, O4); 4596 sllx(O4, 2, O4); 4597 lduw(table, O4, O4); 4598 srlx(crc, 8, crc); 4599 xor3(O4, crc, crc); 4600 bind(L_cleanup_check); 4601 nop(); 4602 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4603 4604 not1(crc); 4605 } 4606 4607 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4608 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4609 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4610 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4611 4612 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4613 4614 Label L_crc32c_head, L_crc32c_aligned; 4615 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4616 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4617 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4618 4619 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4620 4621 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4622 4623 // clear upper 32 bits of crc 4624 clruwu(crc); 4625 4626 and3(buf, 7, G4); 4627 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4628 4629 mov(8, G1); 4630 sub(G1, G4, G4); 4631 4632 // ------ process the misaligned head (7 bytes or less) ------ 4633 bind(L_crc32c_head); 4634 4635 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4636 ldub(buf, 0, G1); 4637 update_byte_crc32(crc, G1, table); 4638 4639 inc(buf); 4640 dec(len); 4641 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4642 dec(G4); 4643 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4644 4645 // ------ process the 8-byte-aligned body ------ 4646 bind(L_crc32c_aligned); 4647 nop(); 4648 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4649 4650 // reverse the byte order of lower 32 bits to big endian, and move to FP side 4651 movitof_revbytes(crc, F0, G1, G3); 4652 4653 set(CHUNK_LEN*8*4, G4); 4654 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 4655 4656 // ------ process four 1KB chunks in parallel ------ 4657 bind(L_crc32c_parallel); 4658 4659 fzero(FloatRegisterImpl::D, F2); 4660 fzero(FloatRegisterImpl::D, F4); 4661 fzero(FloatRegisterImpl::D, F6); 4662 4663 mov(CHUNK_LEN - 1, G4); 4664 bind(L_crc32c_parallel_loop); 4665 // schedule ldf's ahead of crc32c's to hide the load-use latency 4666 ldf(FloatRegisterImpl::D, buf, 0, F8); 4667 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4668 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4669 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 4670 crc32c(F0, F8, F0); 4671 crc32c(F2, F10, F2); 4672 crc32c(F4, F12, F4); 4673 crc32c(F6, F14, F6); 4674 inc(buf, 8); 4675 dec(G4); 4676 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 4677 4678 ldf(FloatRegisterImpl::D, buf, 0, F8); 4679 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4680 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4681 crc32c(F0, F8, F0); 4682 crc32c(F2, F10, F2); 4683 crc32c(F4, F12, F4); 4684 4685 inc(buf, CHUNK_LEN*24); 4686 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 4687 inc(buf, 8); 4688 4689 prefetch(buf, 0, Assembler::severalReads); 4690 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 4691 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 4692 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 4693 4694 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4695 movftoi_revbytes(F0, O4, G1, G4); 4696 movftoi_revbytes(F2, O5, G1, G4); 4697 movftoi_revbytes(F4, G5, G1, G4); 4698 4699 // combine the results of 4 chunks 4700 set64(CHUNK_K1, G3, G1); 4701 xmulx(O4, G3, O4); 4702 set64(CHUNK_K2, G3, G1); 4703 xmulx(O5, G3, O5); 4704 set64(CHUNK_K3, G3, G1); 4705 xmulx(G5, G3, G5); 4706 4707 movdtox(F14, G4); 4708 xor3(O4, O5, O5); 4709 xor3(G5, O5, O5); 4710 xor3(G4, O5, O5); 4711 4712 // reverse the byte order to big endian, via stack, and move to FP side 4713 // TODO: use new revb instruction 4714 add(SP, -8, G1); 4715 srlx(G1, 3, G1); 4716 sllx(G1, 3, G1); 4717 stx(O5, G1, G0); 4718 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 4719 4720 crc32c(F6, F2, F0); 4721 4722 set(CHUNK_LEN*8*4, G4); 4723 sub(len, G4, len); 4724 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 4725 nop(); 4726 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 4727 4728 bind(L_crc32c_serial); 4729 4730 mov(32, G4); 4731 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 4732 4733 // ------ process 32B chunks ------ 4734 bind(L_crc32c_x32_loop); 4735 ldf(FloatRegisterImpl::D, buf, 0, F2); 4736 crc32c(F0, F2, F0); 4737 ldf(FloatRegisterImpl::D, buf, 8, F2); 4738 crc32c(F0, F2, F0); 4739 ldf(FloatRegisterImpl::D, buf, 16, F2); 4740 crc32c(F0, F2, F0); 4741 ldf(FloatRegisterImpl::D, buf, 24, F2); 4742 inc(buf, 32); 4743 crc32c(F0, F2, F0); 4744 dec(len, 32); 4745 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 4746 4747 bind(L_crc32c_x8); 4748 nop(); 4749 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 4750 4751 // ------ process 8B chunks ------ 4752 bind(L_crc32c_x8_loop); 4753 ldf(FloatRegisterImpl::D, buf, 0, F2); 4754 inc(buf, 8); 4755 crc32c(F0, F2, F0); 4756 dec(len, 8); 4757 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 4758 4759 bind(L_crc32c_done); 4760 4761 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4762 movftoi_revbytes(F0, crc, G1, G3); 4763 4764 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 4765 4766 // ------ process the misaligned tail (7 bytes or less) ------ 4767 bind(L_crc32c_tail); 4768 4769 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4770 ldub(buf, 0, G1); 4771 update_byte_crc32(crc, G1, table); 4772 4773 inc(buf); 4774 dec(len); 4775 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 4776 4777 bind(L_crc32c_return); 4778 nop(); 4779 }