1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/cardTableBarrierSet.hpp" 36 #include "gc/shared/collectedHeap.hpp" 37 #include "memory/universe.hpp" 38 #include "nativeInst_sparc.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/jniHandles.inline.hpp" 43 #include "runtime/safepointMechanism.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 46 #define __ _masm-> 47 48 49 //------------------------------------------------------------ 50 51 52 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 53 if (opr->is_constant()) { 54 LIR_Const* constant = opr->as_constant_ptr(); 55 switch (constant->type()) { 56 case T_INT: { 57 jint value = constant->as_jint(); 58 return Assembler::is_simm13(value); 59 } 60 61 default: 62 return false; 63 } 64 } 65 return false; 66 } 67 68 69 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 70 switch (op->code()) { 71 case lir_null_check: 72 return true; 73 74 75 case lir_add: 76 case lir_ushr: 77 case lir_shr: 78 case lir_shl: 79 // integer shifts and adds are always one instruction 80 return op->result_opr()->is_single_cpu(); 81 82 83 case lir_move: { 84 LIR_Op1* op1 = op->as_Op1(); 85 LIR_Opr src = op1->in_opr(); 86 LIR_Opr dst = op1->result_opr(); 87 88 if (src == dst) { 89 NEEDS_CLEANUP; 90 // this works around a problem where moves with the same src and dst 91 // end up in the delay slot and then the assembler swallows the mov 92 // since it has no effect and then it complains because the delay slot 93 // is empty. returning false stops the optimizer from putting this in 94 // the delay slot 95 return false; 96 } 97 98 // don't put moves involving oops into the delay slot since the VerifyOops code 99 // will make it much larger than a single instruction. 100 if (VerifyOops) { 101 return false; 102 } 103 104 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 105 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 106 return false; 107 } 108 109 if (UseCompressedOops) { 110 if (dst->is_address() && !dst->is_stack() && is_reference_type(dst->type())) return false; 111 if (src->is_address() && !src->is_stack() && is_reference_type(src->type())) return false; 112 } 113 114 if (UseCompressedClassPointers) { 115 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && 116 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; 117 } 118 119 if (dst->is_register()) { 120 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 121 return !PatchALot; 122 } else if (src->is_single_stack()) { 123 return true; 124 } 125 } 126 127 if (src->is_register()) { 128 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 129 return !PatchALot; 130 } else if (dst->is_single_stack()) { 131 return true; 132 } 133 } 134 135 if (dst->is_register() && 136 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 137 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 138 return true; 139 } 140 141 return false; 142 } 143 144 default: 145 return false; 146 } 147 ShouldNotReachHere(); 148 } 149 150 151 LIR_Opr LIR_Assembler::receiverOpr() { 152 return FrameMap::O0_oop_opr; 153 } 154 155 156 LIR_Opr LIR_Assembler::osrBufferPointer() { 157 return FrameMap::I0_opr; 158 } 159 160 161 int LIR_Assembler::initial_frame_size_in_bytes() const { 162 return in_bytes(frame_map()->framesize_in_bytes()); 163 } 164 165 166 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 167 // we fetch the class of the receiver (O0) and compare it with the cached class. 168 // If they do not match we jump to slow case. 169 int LIR_Assembler::check_icache() { 170 int offset = __ offset(); 171 __ inline_cache_check(O0, G5_inline_cache_reg); 172 return offset; 173 } 174 175 void LIR_Assembler::clinit_barrier(ciMethod* method) { 176 ShouldNotReachHere(); // not implemented 177 } 178 179 void LIR_Assembler::osr_entry() { 180 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 181 // 182 // 1. Create a new compiled activation. 183 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 184 // at the osr_bci; it is not initialized. 185 // 3. Jump to the continuation address in compiled code to resume execution. 186 187 // OSR entry point 188 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 189 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 190 ValueStack* entry_state = osr_entry->end()->state(); 191 int number_of_locks = entry_state->locks_size(); 192 193 // Create a frame for the compiled activation. 194 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 195 196 // OSR buffer is 197 // 198 // locals[nlocals-1..0] 199 // monitors[number_of_locks-1..0] 200 // 201 // locals is a direct copy of the interpreter frame so in the osr buffer 202 // so first slot in the local array is the last local from the interpreter 203 // and last slot is local[0] (receiver) from the interpreter 204 // 205 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 206 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 207 // in the interpreter frame (the method lock if a sync method) 208 209 // Initialize monitors in the compiled activation. 210 // I0: pointer to osr buffer 211 // 212 // All other registers are dead at this point and the locals will be 213 // copied into place by code emitted in the IR. 214 215 Register OSR_buf = osrBufferPointer()->as_register(); 216 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 217 int monitor_offset = BytesPerWord * method()->max_locals() + 218 (2 * BytesPerWord) * (number_of_locks - 1); 219 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 220 // the OSR buffer using 2 word entries: first the lock and then 221 // the oop. 222 for (int i = 0; i < number_of_locks; i++) { 223 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 224 #ifdef ASSERT 225 // verify the interpreter's monitor has a non-null object 226 { 227 Label L; 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 229 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 230 __ stop("locked object is NULL"); 231 __ bind(L); 232 } 233 #endif // ASSERT 234 // Copy the lock field into the compiled activation. 235 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 236 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 237 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 238 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 239 } 240 } 241 } 242 243 244 // -------------------------------------------------------------------------------------------- 245 246 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 247 if (!GenerateSynchronizationCode) return; 248 249 Register obj_reg = obj_opr->as_register(); 250 Register lock_reg = lock_opr->as_register(); 251 252 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 253 Register reg = mon_addr.base(); 254 int offset = mon_addr.disp(); 255 // compute pointer to BasicLock 256 if (mon_addr.is_simm13()) { 257 __ add(reg, offset, lock_reg); 258 } 259 else { 260 __ set(offset, lock_reg); 261 __ add(reg, lock_reg, lock_reg); 262 } 263 // unlock object 264 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 265 // _slow_case_stubs->append(slow_case); 266 // temporary fix: must be created after exceptionhandler, therefore as call stub 267 _slow_case_stubs->append(slow_case); 268 if (UseFastLocking) { 269 // try inlined fast unlocking first, revert to slow locking if it fails 270 // note: lock_reg points to the displaced header since the displaced header offset is 0! 271 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 272 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 273 } else { 274 // always do slow unlocking 275 // note: the slow unlocking code could be inlined here, however if we use 276 // slow unlocking, speed doesn't matter anyway and this solution is 277 // simpler and requires less duplicated code - additionally, the 278 // slow unlocking code is the same in either case which simplifies 279 // debugging 280 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 281 __ delayed()->nop(); 282 } 283 // done 284 __ bind(*slow_case->continuation()); 285 } 286 287 288 int LIR_Assembler::emit_exception_handler() { 289 // if the last instruction is a call (typically to do a throw which 290 // is coming at the end after block reordering) the return address 291 // must still point into the code area in order to avoid assertion 292 // failures when searching for the corresponding bci => add a nop 293 // (was bug 5/14/1999 - gri) 294 __ nop(); 295 296 // generate code for exception handler 297 ciMethod* method = compilation()->method(); 298 299 address handler_base = __ start_a_stub(exception_handler_size()); 300 301 if (handler_base == NULL) { 302 // not enough space left for the handler 303 bailout("exception handler overflow"); 304 return -1; 305 } 306 307 int offset = code_offset(); 308 309 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 310 __ delayed()->nop(); 311 __ should_not_reach_here(); 312 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 313 __ end_a_stub(); 314 315 return offset; 316 } 317 318 319 // Emit the code to remove the frame from the stack in the exception 320 // unwind path. 321 int LIR_Assembler::emit_unwind_handler() { 322 #ifndef PRODUCT 323 if (CommentedAssembly) { 324 _masm->block_comment("Unwind handler"); 325 } 326 #endif 327 328 int offset = code_offset(); 329 330 // Fetch the exception from TLS and clear out exception related thread state 331 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 332 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 333 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 334 335 __ bind(_unwind_handler_entry); 336 __ verify_not_null_oop(O0); 337 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 338 __ mov(O0, I0); // Preserve the exception 339 } 340 341 // Preform needed unlocking 342 MonitorExitStub* stub = NULL; 343 if (method()->is_synchronized()) { 344 monitor_address(0, FrameMap::I1_opr); 345 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 346 __ unlock_object(I3, I2, I1, *stub->entry()); 347 __ bind(*stub->continuation()); 348 } 349 350 if (compilation()->env()->dtrace_method_probes()) { 351 __ mov(G2_thread, O0); 352 __ save_thread(I1); // need to preserve thread in G2 across 353 // runtime call 354 metadata2reg(method()->constant_encoding(), O1); 355 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 356 __ delayed()->nop(); 357 __ restore_thread(I1); 358 } 359 360 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 361 __ mov(I0, O0); // Restore the exception 362 } 363 364 // dispatch to the unwind logic 365 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 366 __ delayed()->nop(); 367 368 // Emit the slow path assembly 369 if (stub != NULL) { 370 stub->emit_code(this); 371 } 372 373 return offset; 374 } 375 376 377 int LIR_Assembler::emit_deopt_handler() { 378 // if the last instruction is a call (typically to do a throw which 379 // is coming at the end after block reordering) the return address 380 // must still point into the code area in order to avoid assertion 381 // failures when searching for the corresponding bci => add a nop 382 // (was bug 5/14/1999 - gri) 383 __ nop(); 384 385 // generate code for deopt handler 386 ciMethod* method = compilation()->method(); 387 address handler_base = __ start_a_stub(deopt_handler_size()); 388 if (handler_base == NULL) { 389 // not enough space left for the handler 390 bailout("deopt handler overflow"); 391 return -1; 392 } 393 394 int offset = code_offset(); 395 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 396 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 397 __ delayed()->nop(); 398 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 399 __ end_a_stub(); 400 401 return offset; 402 } 403 404 405 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 406 if (o == NULL) { 407 __ set(NULL_WORD, reg); 408 } else { 409 #ifdef ASSERT 410 { 411 ThreadInVMfromNative tiv(JavaThread::current()); 412 assert(Universe::heap()->is_in(JNIHandles::resolve(o)), "should be real oop"); 413 } 414 #endif 415 int oop_index = __ oop_recorder()->find_index(o); 416 RelocationHolder rspec = oop_Relocation::spec(oop_index); 417 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 418 } 419 } 420 421 422 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 423 // Allocate a new index in table to hold the object once it's been patched 424 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 425 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 426 427 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 428 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 429 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 430 // NULL will be dynamically patched later and the patched value may be large. We must 431 // therefore generate the sethi/add as a placeholders 432 __ patchable_set(addrlit, reg); 433 434 patching_epilog(patch, lir_patch_normal, reg, info); 435 } 436 437 438 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 439 __ set_metadata_constant(o, reg); 440 } 441 442 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 443 // Allocate a new index in table to hold the klass once it's been patched 444 int index = __ oop_recorder()->allocate_metadata_index(NULL); 445 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 446 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); 447 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 448 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 449 // NULL will be dynamically patched later and the patched value may be large. We must 450 // therefore generate the sethi/add as a placeholders 451 __ patchable_set(addrlit, reg); 452 453 patching_epilog(patch, lir_patch_normal, reg, info); 454 } 455 456 void LIR_Assembler::emit_op3(LIR_Op3* op) { 457 switch (op->code()) { 458 case lir_idiv: 459 case lir_irem: // Both idiv & irem are handled after the switch (below). 460 break; 461 case lir_fmaf: 462 __ fmadd(FloatRegisterImpl::S, 463 op->in_opr1()->as_float_reg(), 464 op->in_opr2()->as_float_reg(), 465 op->in_opr3()->as_float_reg(), 466 op->result_opr()->as_float_reg()); 467 return; 468 case lir_fmad: 469 __ fmadd(FloatRegisterImpl::D, 470 op->in_opr1()->as_double_reg(), 471 op->in_opr2()->as_double_reg(), 472 op->in_opr3()->as_double_reg(), 473 op->result_opr()->as_double_reg()); 474 return; 475 default: 476 ShouldNotReachHere(); 477 break; 478 } 479 480 // Handle idiv & irem: 481 482 Register Rdividend = op->in_opr1()->as_register(); 483 Register Rdivisor = noreg; 484 Register Rscratch = op->in_opr3()->as_register(); 485 Register Rresult = op->result_opr()->as_register(); 486 int divisor = -1; 487 488 if (op->in_opr2()->is_register()) { 489 Rdivisor = op->in_opr2()->as_register(); 490 } else { 491 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 492 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 493 } 494 495 assert(Rdividend != Rscratch, ""); 496 assert(Rdivisor != Rscratch, ""); 497 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 498 499 if (Rdivisor == noreg && is_power_of_2(divisor)) { 500 // convert division by a power of two into some shifts and logical operations 501 if (op->code() == lir_idiv) { 502 if (divisor == 2) { 503 __ srl(Rdividend, 31, Rscratch); 504 } else { 505 __ sra(Rdividend, 31, Rscratch); 506 __ and3(Rscratch, divisor - 1, Rscratch); 507 } 508 __ add(Rdividend, Rscratch, Rscratch); 509 __ sra(Rscratch, log2_int(divisor), Rresult); 510 return; 511 } else { 512 if (divisor == 2) { 513 __ srl(Rdividend, 31, Rscratch); 514 } else { 515 __ sra(Rdividend, 31, Rscratch); 516 __ and3(Rscratch, divisor - 1,Rscratch); 517 } 518 __ add(Rdividend, Rscratch, Rscratch); 519 __ andn(Rscratch, divisor - 1,Rscratch); 520 __ sub(Rdividend, Rscratch, Rresult); 521 return; 522 } 523 } 524 525 __ sra(Rdividend, 31, Rscratch); 526 __ wry(Rscratch); 527 528 add_debug_info_for_div0_here(op->info()); 529 530 if (Rdivisor != noreg) { 531 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 532 } else { 533 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 534 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 535 } 536 537 Label skip; 538 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 539 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 540 __ bind(skip); 541 542 if (op->code() == lir_irem) { 543 if (Rdivisor != noreg) { 544 __ smul(Rscratch, Rdivisor, Rscratch); 545 } else { 546 __ smul(Rscratch, divisor, Rscratch); 547 } 548 __ sub(Rdividend, Rscratch, Rresult); 549 } 550 } 551 552 553 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 554 #ifdef ASSERT 555 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 556 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 557 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 558 #endif 559 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 560 561 if (op->cond() == lir_cond_always) { 562 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 563 } else if (op->code() == lir_cond_float_branch) { 564 assert(op->ublock() != NULL, "must have unordered successor"); 565 bool is_unordered = (op->ublock() == op->block()); 566 Assembler::Condition acond; 567 switch (op->cond()) { 568 case lir_cond_equal: acond = Assembler::f_equal; break; 569 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 570 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 571 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 572 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 573 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 574 default : ShouldNotReachHere(); 575 } 576 __ fb( acond, false, Assembler::pn, *(op->label())); 577 } else { 578 assert (op->code() == lir_branch, "just checking"); 579 580 Assembler::Condition acond; 581 switch (op->cond()) { 582 case lir_cond_equal: acond = Assembler::equal; break; 583 case lir_cond_notEqual: acond = Assembler::notEqual; break; 584 case lir_cond_less: acond = Assembler::less; break; 585 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 586 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 587 case lir_cond_greater: acond = Assembler::greater; break; 588 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 589 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 590 default: ShouldNotReachHere(); 591 }; 592 593 // sparc has different condition codes for testing 32-bit 594 // vs. 64-bit values. We could always test xcc is we could 595 // guarantee that 32-bit loads always sign extended but that isn't 596 // true and since sign extension isn't free, it would impose a 597 // slight cost. 598 if (op->type() == T_INT) { 599 __ br(acond, false, Assembler::pn, *(op->label())); 600 } else 601 __ brx(acond, false, Assembler::pn, *(op->label())); 602 } 603 // The peephole pass fills the delay slot 604 } 605 606 607 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 608 Bytecodes::Code code = op->bytecode(); 609 LIR_Opr dst = op->result_opr(); 610 611 switch(code) { 612 case Bytecodes::_i2l: { 613 Register rlo = dst->as_register_lo(); 614 Register rhi = dst->as_register_hi(); 615 Register rval = op->in_opr()->as_register(); 616 __ sra(rval, 0, rlo); 617 break; 618 } 619 case Bytecodes::_i2d: 620 case Bytecodes::_i2f: { 621 bool is_double = (code == Bytecodes::_i2d); 622 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 623 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 624 FloatRegister rsrc = op->in_opr()->as_float_reg(); 625 if (rsrc != rdst) { 626 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 627 } 628 __ fitof(w, rdst, rdst); 629 break; 630 } 631 case Bytecodes::_f2i:{ 632 FloatRegister rsrc = op->in_opr()->as_float_reg(); 633 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 634 Label L; 635 // result must be 0 if value is NaN; test by comparing value to itself 636 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 637 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 638 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 639 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 640 // move integer result from float register to int register 641 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 642 __ bind (L); 643 break; 644 } 645 case Bytecodes::_l2i: { 646 Register rlo = op->in_opr()->as_register_lo(); 647 Register rhi = op->in_opr()->as_register_hi(); 648 Register rdst = dst->as_register(); 649 __ sra(rlo, 0, rdst); 650 break; 651 } 652 case Bytecodes::_d2f: 653 case Bytecodes::_f2d: { 654 bool is_double = (code == Bytecodes::_f2d); 655 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 656 LIR_Opr val = op->in_opr(); 657 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 658 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 659 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 660 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 661 __ ftof(vw, dw, rval, rdst); 662 break; 663 } 664 case Bytecodes::_i2s: 665 case Bytecodes::_i2b: { 666 Register rval = op->in_opr()->as_register(); 667 Register rdst = dst->as_register(); 668 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 669 __ sll (rval, shift, rdst); 670 __ sra (rdst, shift, rdst); 671 break; 672 } 673 case Bytecodes::_i2c: { 674 Register rval = op->in_opr()->as_register(); 675 Register rdst = dst->as_register(); 676 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 677 __ sll (rval, shift, rdst); 678 __ srl (rdst, shift, rdst); 679 break; 680 } 681 682 default: ShouldNotReachHere(); 683 } 684 } 685 686 687 void LIR_Assembler::align_call(LIR_Code) { 688 // do nothing since all instructions are word aligned on sparc 689 } 690 691 692 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 693 __ call(op->addr(), rtype); 694 // The peephole pass fills the delay slot, add_call_info is done in 695 // LIR_Assembler::emit_delay. 696 } 697 698 699 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 700 __ ic_call(op->addr(), false); 701 // The peephole pass fills the delay slot, add_call_info is done in 702 // LIR_Assembler::emit_delay. 703 } 704 705 706 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 707 add_debug_info_for_null_check_here(op->info()); 708 __ load_klass(O0, G3_scratch); 709 if (Assembler::is_simm13(op->vtable_offset())) { 710 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 711 } else { 712 // This will generate 2 instructions 713 __ set(op->vtable_offset(), G5_method); 714 // ld_ptr, set_hi, set 715 __ ld_ptr(G3_scratch, G5_method, G5_method); 716 } 717 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); 718 __ callr(G3_scratch, G0); 719 // the peephole pass fills the delay slot 720 } 721 722 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 723 int store_offset; 724 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 725 assert(base != O7, "destroying register"); 726 assert(!unaligned, "can't handle this"); 727 // for offsets larger than a simm13 we setup the offset in O7 728 __ set(offset, O7); 729 store_offset = store(from_reg, base, O7, type, wide); 730 } else { 731 if (is_reference_type(type)) { 732 __ verify_oop(from_reg->as_register()); 733 } 734 store_offset = code_offset(); 735 switch (type) { 736 case T_BOOLEAN: // fall through 737 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 738 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 739 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 740 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 741 case T_LONG : 742 if (unaligned || PatchALot) { 743 // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem) 744 assert(G3_scratch != base, "can't handle this"); 745 assert(G3_scratch != from_reg->as_register_lo(), "can't handle this"); 746 __ srax(from_reg->as_register_lo(), 32, G3_scratch); 747 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 748 __ stw(G3_scratch, base, offset + hi_word_offset_in_bytes); 749 } else { 750 __ stx(from_reg->as_register_lo(), base, offset); 751 } 752 break; 753 case T_ADDRESS: 754 case T_METADATA: 755 __ st_ptr(from_reg->as_register(), base, offset); 756 break; 757 case T_ARRAY : // fall through 758 case T_OBJECT: 759 { 760 if (UseCompressedOops && !wide) { 761 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 762 store_offset = code_offset(); 763 __ stw(G3_scratch, base, offset); 764 } else { 765 __ st_ptr(from_reg->as_register(), base, offset); 766 } 767 break; 768 } 769 770 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 771 case T_DOUBLE: 772 { 773 FloatRegister reg = from_reg->as_double_reg(); 774 // split unaligned stores 775 if (unaligned || PatchALot) { 776 assert(Assembler::is_simm13(offset + 4), "must be"); 777 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 778 __ stf(FloatRegisterImpl::S, reg, base, offset); 779 } else { 780 __ stf(FloatRegisterImpl::D, reg, base, offset); 781 } 782 break; 783 } 784 default : ShouldNotReachHere(); 785 } 786 } 787 return store_offset; 788 } 789 790 791 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 792 if (is_reference_type(type)) { 793 __ verify_oop(from_reg->as_register()); 794 } 795 int store_offset = code_offset(); 796 switch (type) { 797 case T_BOOLEAN: // fall through 798 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 799 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 800 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 801 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 802 case T_LONG : 803 __ stx(from_reg->as_register_lo(), base, disp); 804 break; 805 case T_ADDRESS: 806 __ st_ptr(from_reg->as_register(), base, disp); 807 break; 808 case T_ARRAY : // fall through 809 case T_OBJECT: 810 { 811 if (UseCompressedOops && !wide) { 812 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 813 store_offset = code_offset(); 814 __ stw(G3_scratch, base, disp); 815 } else { 816 __ st_ptr(from_reg->as_register(), base, disp); 817 } 818 break; 819 } 820 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 821 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 822 default : ShouldNotReachHere(); 823 } 824 return store_offset; 825 } 826 827 828 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 829 int load_offset; 830 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 831 assert(base != O7, "destroying register"); 832 assert(!unaligned, "can't handle this"); 833 // for offsets larger than a simm13 we setup the offset in O7 834 __ set(offset, O7); 835 load_offset = load(base, O7, to_reg, type, wide); 836 } else { 837 load_offset = code_offset(); 838 switch(type) { 839 case T_BOOLEAN: // fall through 840 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 841 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 842 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 843 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 844 case T_LONG : 845 if (!unaligned && !PatchALot) { 846 __ ldx(base, offset, to_reg->as_register_lo()); 847 } else { 848 assert(base != to_reg->as_register_lo(), "can't handle this"); 849 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 850 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 851 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 852 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 853 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 854 } 855 break; 856 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; 857 case T_ADDRESS: 858 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 859 __ lduw(base, offset, to_reg->as_register()); 860 __ decode_klass_not_null(to_reg->as_register()); 861 } else 862 { 863 __ ld_ptr(base, offset, to_reg->as_register()); 864 } 865 break; 866 case T_ARRAY : // fall through 867 case T_OBJECT: 868 { 869 if (UseCompressedOops && !wide) { 870 __ lduw(base, offset, to_reg->as_register()); 871 __ decode_heap_oop(to_reg->as_register()); 872 } else { 873 __ ld_ptr(base, offset, to_reg->as_register()); 874 } 875 break; 876 } 877 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 878 case T_DOUBLE: 879 { 880 FloatRegister reg = to_reg->as_double_reg(); 881 // split unaligned loads 882 if (unaligned || PatchALot) { 883 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 884 __ ldf(FloatRegisterImpl::S, base, offset, reg); 885 } else { 886 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 887 } 888 break; 889 } 890 default : ShouldNotReachHere(); 891 } 892 if (is_reference_type(type)) { 893 __ verify_oop(to_reg->as_register()); 894 } 895 } 896 return load_offset; 897 } 898 899 900 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 901 int load_offset = code_offset(); 902 switch(type) { 903 case T_BOOLEAN: // fall through 904 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 905 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 906 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 907 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 908 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 909 case T_ARRAY : // fall through 910 case T_OBJECT: 911 { 912 if (UseCompressedOops && !wide) { 913 __ lduw(base, disp, to_reg->as_register()); 914 __ decode_heap_oop(to_reg->as_register()); 915 } else { 916 __ ld_ptr(base, disp, to_reg->as_register()); 917 } 918 break; 919 } 920 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 921 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 922 case T_LONG : 923 __ ldx(base, disp, to_reg->as_register_lo()); 924 break; 925 default : ShouldNotReachHere(); 926 } 927 if (is_reference_type(type)) { 928 __ verify_oop(to_reg->as_register()); 929 } 930 return load_offset; 931 } 932 933 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 934 LIR_Const* c = src->as_constant_ptr(); 935 switch (c->type()) { 936 case T_INT: 937 case T_FLOAT: { 938 Register src_reg = O7; 939 int value = c->as_jint_bits(); 940 if (value == 0) { 941 src_reg = G0; 942 } else { 943 __ set(value, O7); 944 } 945 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 946 __ stw(src_reg, addr.base(), addr.disp()); 947 break; 948 } 949 case T_ADDRESS: { 950 Register src_reg = O7; 951 int value = c->as_jint_bits(); 952 if (value == 0) { 953 src_reg = G0; 954 } else { 955 __ set(value, O7); 956 } 957 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 958 __ st_ptr(src_reg, addr.base(), addr.disp()); 959 break; 960 } 961 case T_OBJECT: { 962 Register src_reg = O7; 963 jobject2reg(c->as_jobject(), src_reg); 964 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 965 __ st_ptr(src_reg, addr.base(), addr.disp()); 966 break; 967 } 968 case T_LONG: 969 case T_DOUBLE: { 970 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 971 972 Register tmp = O7; 973 int value_lo = c->as_jint_lo_bits(); 974 if (value_lo == 0) { 975 tmp = G0; 976 } else { 977 __ set(value_lo, O7); 978 } 979 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 980 int value_hi = c->as_jint_hi_bits(); 981 if (value_hi == 0) { 982 tmp = G0; 983 } else { 984 __ set(value_hi, O7); 985 } 986 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 987 break; 988 } 989 default: 990 Unimplemented(); 991 } 992 } 993 994 995 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 996 LIR_Const* c = src->as_constant_ptr(); 997 LIR_Address* addr = dest->as_address_ptr(); 998 Register base = addr->base()->as_pointer_register(); 999 int offset = -1; 1000 1001 switch (c->type()) { 1002 case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions. 1003 case T_INT: 1004 case T_ADDRESS: { 1005 LIR_Opr tmp = FrameMap::O7_opr; 1006 int value = c->as_jint_bits(); 1007 if (value == 0) { 1008 tmp = FrameMap::G0_opr; 1009 } else if (Assembler::is_simm13(value)) { 1010 __ set(value, O7); 1011 } 1012 if (addr->index()->is_valid()) { 1013 assert(addr->disp() == 0, "must be zero"); 1014 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1015 } else { 1016 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1017 offset = store(tmp, base, addr->disp(), type, wide, false); 1018 } 1019 break; 1020 } 1021 case T_LONG: 1022 case T_DOUBLE: { 1023 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1024 assert(Assembler::is_simm13(addr->disp()) && 1025 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1026 1027 LIR_Opr tmp = FrameMap::O7_opr; 1028 int value_lo = c->as_jint_lo_bits(); 1029 if (value_lo == 0) { 1030 tmp = FrameMap::G0_opr; 1031 } else { 1032 __ set(value_lo, O7); 1033 } 1034 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1035 int value_hi = c->as_jint_hi_bits(); 1036 if (value_hi == 0) { 1037 tmp = FrameMap::G0_opr; 1038 } else { 1039 __ set(value_hi, O7); 1040 } 1041 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1042 break; 1043 } 1044 case T_OBJECT: { 1045 jobject obj = c->as_jobject(); 1046 LIR_Opr tmp; 1047 if (obj == NULL) { 1048 tmp = FrameMap::G0_opr; 1049 } else { 1050 tmp = FrameMap::O7_opr; 1051 jobject2reg(c->as_jobject(), O7); 1052 } 1053 // handle either reg+reg or reg+disp address 1054 if (addr->index()->is_valid()) { 1055 assert(addr->disp() == 0, "must be zero"); 1056 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1057 } else { 1058 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1059 offset = store(tmp, base, addr->disp(), type, wide, false); 1060 } 1061 1062 break; 1063 } 1064 default: 1065 Unimplemented(); 1066 } 1067 if (info != NULL) { 1068 assert(offset != -1, "offset should've been set"); 1069 add_debug_info_for_null_check(offset, info); 1070 } 1071 } 1072 1073 1074 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1075 LIR_Const* c = src->as_constant_ptr(); 1076 LIR_Opr to_reg = dest; 1077 1078 switch (c->type()) { 1079 case T_INT: 1080 case T_ADDRESS: 1081 { 1082 jint con = c->as_jint(); 1083 if (to_reg->is_single_cpu()) { 1084 assert(patch_code == lir_patch_none, "no patching handled here"); 1085 __ set(con, to_reg->as_register()); 1086 } else { 1087 ShouldNotReachHere(); 1088 assert(to_reg->is_single_fpu(), "wrong register kind"); 1089 1090 __ set(con, O7); 1091 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1092 __ st(O7, temp_slot); 1093 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1094 } 1095 } 1096 break; 1097 1098 case T_LONG: 1099 { 1100 jlong con = c->as_jlong(); 1101 1102 if (to_reg->is_double_cpu()) { 1103 __ set(con, to_reg->as_register_lo()); 1104 } else if (to_reg->is_single_cpu()) { 1105 __ set(con, to_reg->as_register()); 1106 } else { 1107 ShouldNotReachHere(); 1108 assert(to_reg->is_double_fpu(), "wrong register kind"); 1109 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1110 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1111 __ set(low(con), O7); 1112 __ st(O7, temp_slot_lo); 1113 __ set(high(con), O7); 1114 __ st(O7, temp_slot_hi); 1115 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1116 } 1117 } 1118 break; 1119 1120 case T_OBJECT: 1121 { 1122 if (patch_code == lir_patch_none) { 1123 jobject2reg(c->as_jobject(), to_reg->as_register()); 1124 } else { 1125 jobject2reg_with_patching(to_reg->as_register(), info); 1126 } 1127 } 1128 break; 1129 1130 case T_METADATA: 1131 { 1132 if (patch_code == lir_patch_none) { 1133 metadata2reg(c->as_metadata(), to_reg->as_register()); 1134 } else { 1135 klass2reg_with_patching(to_reg->as_register(), info); 1136 } 1137 } 1138 break; 1139 1140 case T_FLOAT: 1141 { 1142 address const_addr = __ float_constant(c->as_jfloat()); 1143 if (const_addr == NULL) { 1144 bailout("const section overflow"); 1145 break; 1146 } 1147 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1148 AddressLiteral const_addrlit(const_addr, rspec); 1149 if (to_reg->is_single_fpu()) { 1150 __ patchable_sethi(const_addrlit, O7); 1151 __ relocate(rspec); 1152 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1153 1154 } else { 1155 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1156 1157 __ set(const_addrlit, O7); 1158 __ ld(O7, 0, to_reg->as_register()); 1159 } 1160 } 1161 break; 1162 1163 case T_DOUBLE: 1164 { 1165 address const_addr = __ double_constant(c->as_jdouble()); 1166 if (const_addr == NULL) { 1167 bailout("const section overflow"); 1168 break; 1169 } 1170 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1171 1172 if (to_reg->is_double_fpu()) { 1173 AddressLiteral const_addrlit(const_addr, rspec); 1174 __ patchable_sethi(const_addrlit, O7); 1175 __ relocate(rspec); 1176 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1177 } else { 1178 assert(to_reg->is_double_cpu(), "Must be a long register."); 1179 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1180 } 1181 1182 } 1183 break; 1184 1185 default: 1186 ShouldNotReachHere(); 1187 } 1188 } 1189 1190 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1191 Register reg = addr->base()->as_pointer_register(); 1192 LIR_Opr index = addr->index(); 1193 if (index->is_illegal()) { 1194 return Address(reg, addr->disp()); 1195 } else { 1196 assert (addr->disp() == 0, "unsupported address mode"); 1197 return Address(reg, index->as_pointer_register()); 1198 } 1199 } 1200 1201 1202 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1203 switch (type) { 1204 case T_INT: 1205 case T_FLOAT: { 1206 Register tmp = O7; 1207 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1208 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1209 __ lduw(from.base(), from.disp(), tmp); 1210 __ stw(tmp, to.base(), to.disp()); 1211 break; 1212 } 1213 case T_ADDRESS: 1214 case T_OBJECT: { 1215 Register tmp = O7; 1216 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1217 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1218 __ ld_ptr(from.base(), from.disp(), tmp); 1219 __ st_ptr(tmp, to.base(), to.disp()); 1220 break; 1221 } 1222 case T_LONG: 1223 case T_DOUBLE: { 1224 Register tmp = O7; 1225 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1226 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1227 __ lduw(from.base(), from.disp(), tmp); 1228 __ stw(tmp, to.base(), to.disp()); 1229 __ lduw(from.base(), from.disp() + 4, tmp); 1230 __ stw(tmp, to.base(), to.disp() + 4); 1231 break; 1232 } 1233 1234 default: 1235 ShouldNotReachHere(); 1236 } 1237 } 1238 1239 1240 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1241 Address base = as_Address(addr); 1242 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1243 } 1244 1245 1246 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1247 Address base = as_Address(addr); 1248 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1249 } 1250 1251 1252 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1253 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1254 1255 assert(type != T_METADATA, "load of metadata ptr not supported"); 1256 LIR_Address* addr = src_opr->as_address_ptr(); 1257 LIR_Opr to_reg = dest; 1258 1259 Register src = addr->base()->as_pointer_register(); 1260 Register disp_reg = noreg; 1261 int disp_value = addr->disp(); 1262 bool needs_patching = (patch_code != lir_patch_none); 1263 1264 if (addr->base()->type() == T_OBJECT) { 1265 __ verify_oop(src); 1266 } 1267 1268 PatchingStub* patch = NULL; 1269 if (needs_patching) { 1270 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1271 assert(!to_reg->is_double_cpu() || 1272 patch_code == lir_patch_none || 1273 patch_code == lir_patch_normal, "patching doesn't match register"); 1274 } 1275 1276 if (addr->index()->is_illegal()) { 1277 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1278 if (needs_patching) { 1279 __ patchable_set(0, O7); 1280 } else { 1281 __ set(disp_value, O7); 1282 } 1283 disp_reg = O7; 1284 } 1285 } else if (unaligned || PatchALot) { 1286 __ add(src, addr->index()->as_pointer_register(), O7); 1287 src = O7; 1288 } else { 1289 disp_reg = addr->index()->as_pointer_register(); 1290 assert(disp_value == 0, "can't handle 3 operand addresses"); 1291 } 1292 1293 // remember the offset of the load. The patching_epilog must be done 1294 // before the call to add_debug_info, otherwise the PcDescs don't get 1295 // entered in increasing order. 1296 int offset = code_offset(); 1297 1298 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1299 if (disp_reg == noreg) { 1300 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1301 } else { 1302 assert(!unaligned, "can't handle this"); 1303 offset = load(src, disp_reg, to_reg, type, wide); 1304 } 1305 1306 if (patch != NULL) { 1307 patching_epilog(patch, patch_code, src, info); 1308 } 1309 if (info != NULL) add_debug_info_for_null_check(offset, info); 1310 } 1311 1312 1313 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1314 Address addr; 1315 if (src->is_single_word()) { 1316 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1317 } else if (src->is_double_word()) { 1318 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1319 } 1320 1321 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1322 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1323 } 1324 1325 1326 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1327 Address addr; 1328 if (dest->is_single_word()) { 1329 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1330 } else if (dest->is_double_word()) { 1331 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1332 } 1333 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1334 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1335 } 1336 1337 1338 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1339 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1340 if (from_reg->is_double_fpu()) { 1341 // double to double moves 1342 assert(to_reg->is_double_fpu(), "should match"); 1343 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1344 } else { 1345 // float to float moves 1346 assert(to_reg->is_single_fpu(), "should match"); 1347 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1348 } 1349 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1350 if (from_reg->is_double_cpu()) { 1351 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1352 } else if (to_reg->is_double_cpu()) { 1353 // int to int moves 1354 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1355 } else { 1356 // int to int moves 1357 __ mov(from_reg->as_register(), to_reg->as_register()); 1358 } 1359 } else { 1360 ShouldNotReachHere(); 1361 } 1362 if (is_reference_type(to_reg->type())) { 1363 __ verify_oop(to_reg->as_register()); 1364 } 1365 } 1366 1367 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1368 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1369 bool wide, bool unaligned) { 1370 assert(type != T_METADATA, "store of metadata ptr not supported"); 1371 LIR_Address* addr = dest->as_address_ptr(); 1372 1373 Register src = addr->base()->as_pointer_register(); 1374 Register disp_reg = noreg; 1375 int disp_value = addr->disp(); 1376 bool needs_patching = (patch_code != lir_patch_none); 1377 1378 if (addr->base()->is_oop_register()) { 1379 __ verify_oop(src); 1380 } 1381 1382 PatchingStub* patch = NULL; 1383 if (needs_patching) { 1384 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1385 assert(!from_reg->is_double_cpu() || 1386 patch_code == lir_patch_none || 1387 patch_code == lir_patch_normal, "patching doesn't match register"); 1388 } 1389 1390 if (addr->index()->is_illegal()) { 1391 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1392 if (needs_patching) { 1393 __ patchable_set(0, O7); 1394 } else { 1395 __ set(disp_value, O7); 1396 } 1397 disp_reg = O7; 1398 } 1399 } else if (unaligned || PatchALot) { 1400 __ add(src, addr->index()->as_pointer_register(), O7); 1401 src = O7; 1402 } else { 1403 disp_reg = addr->index()->as_pointer_register(); 1404 assert(disp_value == 0, "can't handle 3 operand addresses"); 1405 } 1406 1407 // remember the offset of the store. The patching_epilog must be done 1408 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1409 // entered in increasing order. 1410 int offset; 1411 1412 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1413 if (disp_reg == noreg) { 1414 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1415 } else { 1416 assert(!unaligned, "can't handle this"); 1417 offset = store(from_reg, src, disp_reg, type, wide); 1418 } 1419 1420 if (patch != NULL) { 1421 patching_epilog(patch, patch_code, src, info); 1422 } 1423 1424 if (info != NULL) add_debug_info_for_null_check(offset, info); 1425 } 1426 1427 1428 void LIR_Assembler::return_op(LIR_Opr result) { 1429 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1430 __ reserved_stack_check(); 1431 } 1432 if (SafepointMechanism::uses_thread_local_poll()) { 1433 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0); 1434 } else { 1435 __ set((intptr_t)os::get_polling_page(), L0); 1436 } 1437 __ relocate(relocInfo::poll_return_type); 1438 __ ld_ptr(L0, 0, G0); 1439 __ ret(); 1440 __ delayed()->restore(); 1441 } 1442 1443 1444 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1445 if (SafepointMechanism::uses_thread_local_poll()) { 1446 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), tmp->as_register()); 1447 } else { 1448 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1449 } 1450 if (info != NULL) { 1451 add_debug_info_for_branch(info); 1452 } 1453 int offset = __ offset(); 1454 1455 __ relocate(relocInfo::poll_type); 1456 __ ld_ptr(tmp->as_register(), 0, G0); 1457 return offset; 1458 } 1459 1460 1461 void LIR_Assembler::emit_static_call_stub() { 1462 address call_pc = __ pc(); 1463 address stub = __ start_a_stub(call_stub_size()); 1464 if (stub == NULL) { 1465 bailout("static call stub overflow"); 1466 return; 1467 } 1468 1469 int start = __ offset(); 1470 __ relocate(static_stub_Relocation::spec(call_pc)); 1471 1472 __ set_metadata(NULL, G5); 1473 // must be set to -1 at code generation time 1474 AddressLiteral addrlit(-1); 1475 __ jump_to(addrlit, G3); 1476 __ delayed()->nop(); 1477 1478 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1479 __ end_a_stub(); 1480 } 1481 1482 1483 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1484 if (opr1->is_single_fpu()) { 1485 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1486 } else if (opr1->is_double_fpu()) { 1487 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1488 } else if (opr1->is_single_cpu()) { 1489 if (opr2->is_constant()) { 1490 switch (opr2->as_constant_ptr()->type()) { 1491 case T_INT: 1492 { jint con = opr2->as_constant_ptr()->as_jint(); 1493 if (Assembler::is_simm13(con)) { 1494 __ cmp(opr1->as_register(), con); 1495 } else { 1496 __ set(con, O7); 1497 __ cmp(opr1->as_register(), O7); 1498 } 1499 } 1500 break; 1501 1502 case T_OBJECT: 1503 // there are only equal/notequal comparisions on objects 1504 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1505 if (con == NULL) { 1506 __ cmp(opr1->as_register(), 0); 1507 } else { 1508 jobject2reg(con, O7); 1509 __ cmp(opr1->as_register(), O7); 1510 } 1511 } 1512 break; 1513 1514 case T_METADATA: 1515 // We only need, for now, comparison with NULL for metadata. 1516 { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1517 Metadata* m = opr2->as_constant_ptr()->as_metadata(); 1518 if (m == NULL) { 1519 __ cmp(opr1->as_register(), 0); 1520 } else { 1521 ShouldNotReachHere(); 1522 } 1523 } 1524 break; 1525 1526 default: 1527 ShouldNotReachHere(); 1528 break; 1529 } 1530 } else { 1531 if (opr2->is_address()) { 1532 LIR_Address * addr = opr2->as_address_ptr(); 1533 BasicType type = addr->type(); 1534 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1535 else __ ld(as_Address(addr), O7); 1536 __ cmp(opr1->as_register(), O7); 1537 } else { 1538 __ cmp(opr1->as_register(), opr2->as_register()); 1539 } 1540 } 1541 } else if (opr1->is_double_cpu()) { 1542 Register xlo = opr1->as_register_lo(); 1543 Register xhi = opr1->as_register_hi(); 1544 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1545 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1546 __ orcc(xhi, G0, G0); 1547 } else if (opr2->is_register()) { 1548 Register ylo = opr2->as_register_lo(); 1549 Register yhi = opr2->as_register_hi(); 1550 __ cmp(xlo, ylo); 1551 } else { 1552 ShouldNotReachHere(); 1553 } 1554 } else if (opr1->is_address()) { 1555 LIR_Address * addr = opr1->as_address_ptr(); 1556 BasicType type = addr->type(); 1557 assert (opr2->is_constant(), "Checking"); 1558 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1559 else __ ld(as_Address(addr), O7); 1560 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1561 } else { 1562 ShouldNotReachHere(); 1563 } 1564 } 1565 1566 1567 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1568 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1569 bool is_unordered_less = (code == lir_ucmp_fd2i); 1570 if (left->is_single_fpu()) { 1571 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1572 } else if (left->is_double_fpu()) { 1573 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1574 } else { 1575 ShouldNotReachHere(); 1576 } 1577 } else if (code == lir_cmp_l2i) { 1578 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1579 } else { 1580 ShouldNotReachHere(); 1581 } 1582 } 1583 1584 1585 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1586 Assembler::Condition acond; 1587 switch (condition) { 1588 case lir_cond_equal: acond = Assembler::equal; break; 1589 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1590 case lir_cond_less: acond = Assembler::less; break; 1591 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1592 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1593 case lir_cond_greater: acond = Assembler::greater; break; 1594 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1595 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1596 default: ShouldNotReachHere(); 1597 }; 1598 1599 if (opr1->is_constant() && opr1->type() == T_INT) { 1600 Register dest = result->as_register(); 1601 // load up first part of constant before branch 1602 // and do the rest in the delay slot. 1603 if (!Assembler::is_simm13(opr1->as_jint())) { 1604 __ sethi(opr1->as_jint(), dest); 1605 } 1606 } else if (opr1->is_constant()) { 1607 const2reg(opr1, result, lir_patch_none, NULL); 1608 } else if (opr1->is_register()) { 1609 reg2reg(opr1, result); 1610 } else if (opr1->is_stack()) { 1611 stack2reg(opr1, result, result->type()); 1612 } else { 1613 ShouldNotReachHere(); 1614 } 1615 Label skip; 1616 if (type == T_INT) { 1617 __ br(acond, false, Assembler::pt, skip); 1618 } else { 1619 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1620 } 1621 if (opr1->is_constant() && opr1->type() == T_INT) { 1622 Register dest = result->as_register(); 1623 if (Assembler::is_simm13(opr1->as_jint())) { 1624 __ delayed()->or3(G0, opr1->as_jint(), dest); 1625 } else { 1626 // the sethi has been done above, so just put in the low 10 bits 1627 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1628 } 1629 } else { 1630 // can't do anything useful in the delay slot 1631 __ delayed()->nop(); 1632 } 1633 if (opr2->is_constant()) { 1634 const2reg(opr2, result, lir_patch_none, NULL); 1635 } else if (opr2->is_register()) { 1636 reg2reg(opr2, result); 1637 } else if (opr2->is_stack()) { 1638 stack2reg(opr2, result, result->type()); 1639 } else { 1640 ShouldNotReachHere(); 1641 } 1642 __ bind(skip); 1643 } 1644 1645 1646 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1647 assert(info == NULL, "unused on this code path"); 1648 assert(left->is_register(), "wrong items state"); 1649 assert(dest->is_register(), "wrong items state"); 1650 1651 if (right->is_register()) { 1652 if (dest->is_float_kind()) { 1653 1654 FloatRegister lreg, rreg, res; 1655 FloatRegisterImpl::Width w; 1656 if (right->is_single_fpu()) { 1657 w = FloatRegisterImpl::S; 1658 lreg = left->as_float_reg(); 1659 rreg = right->as_float_reg(); 1660 res = dest->as_float_reg(); 1661 } else { 1662 w = FloatRegisterImpl::D; 1663 lreg = left->as_double_reg(); 1664 rreg = right->as_double_reg(); 1665 res = dest->as_double_reg(); 1666 } 1667 1668 switch (code) { 1669 case lir_add: __ fadd(w, lreg, rreg, res); break; 1670 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1671 case lir_mul: // fall through 1672 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1673 case lir_div: // fall through 1674 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1675 default: ShouldNotReachHere(); 1676 } 1677 1678 } else if (dest->is_double_cpu()) { 1679 Register dst_lo = dest->as_register_lo(); 1680 Register op1_lo = left->as_pointer_register(); 1681 Register op2_lo = right->as_pointer_register(); 1682 1683 switch (code) { 1684 case lir_add: 1685 __ add(op1_lo, op2_lo, dst_lo); 1686 break; 1687 1688 case lir_sub: 1689 __ sub(op1_lo, op2_lo, dst_lo); 1690 break; 1691 1692 default: ShouldNotReachHere(); 1693 } 1694 } else { 1695 assert (right->is_single_cpu(), "Just Checking"); 1696 1697 Register lreg = left->as_register(); 1698 Register res = dest->as_register(); 1699 Register rreg = right->as_register(); 1700 switch (code) { 1701 case lir_add: __ add (lreg, rreg, res); break; 1702 case lir_sub: __ sub (lreg, rreg, res); break; 1703 case lir_mul: __ mulx (lreg, rreg, res); break; 1704 default: ShouldNotReachHere(); 1705 } 1706 } 1707 } else { 1708 assert (right->is_constant(), "must be constant"); 1709 1710 if (dest->is_single_cpu()) { 1711 Register lreg = left->as_register(); 1712 Register res = dest->as_register(); 1713 int simm13 = right->as_constant_ptr()->as_jint(); 1714 1715 switch (code) { 1716 case lir_add: __ add (lreg, simm13, res); break; 1717 case lir_sub: __ sub (lreg, simm13, res); break; 1718 case lir_mul: __ mulx (lreg, simm13, res); break; 1719 default: ShouldNotReachHere(); 1720 } 1721 } else { 1722 Register lreg = left->as_pointer_register(); 1723 Register res = dest->as_register_lo(); 1724 long con = right->as_constant_ptr()->as_jlong(); 1725 assert(Assembler::is_simm13(con), "must be simm13"); 1726 1727 switch (code) { 1728 case lir_add: __ add (lreg, (int)con, res); break; 1729 case lir_sub: __ sub (lreg, (int)con, res); break; 1730 case lir_mul: __ mulx (lreg, (int)con, res); break; 1731 default: ShouldNotReachHere(); 1732 } 1733 } 1734 } 1735 } 1736 1737 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1738 switch (code) { 1739 case lir_tan: { 1740 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1741 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1742 break; 1743 } 1744 case lir_sqrt: { 1745 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1746 FloatRegister src_reg = value->as_double_reg(); 1747 FloatRegister dst_reg = dest->as_double_reg(); 1748 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1749 break; 1750 } 1751 case lir_abs: { 1752 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1753 FloatRegister src_reg = value->as_double_reg(); 1754 FloatRegister dst_reg = dest->as_double_reg(); 1755 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1756 break; 1757 } 1758 default: { 1759 ShouldNotReachHere(); 1760 break; 1761 } 1762 } 1763 } 1764 1765 1766 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1767 if (right->is_constant()) { 1768 if (dest->is_single_cpu()) { 1769 int simm13 = right->as_constant_ptr()->as_jint(); 1770 switch (code) { 1771 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1772 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1773 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1774 default: ShouldNotReachHere(); 1775 } 1776 } else { 1777 long c = right->as_constant_ptr()->as_jlong(); 1778 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1779 int simm13 = (int)c; 1780 switch (code) { 1781 case lir_logic_and: 1782 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1783 break; 1784 1785 case lir_logic_or: 1786 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1787 break; 1788 1789 case lir_logic_xor: 1790 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1791 break; 1792 1793 default: ShouldNotReachHere(); 1794 } 1795 } 1796 } else { 1797 assert(right->is_register(), "right should be in register"); 1798 1799 if (dest->is_single_cpu()) { 1800 switch (code) { 1801 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1802 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1803 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1804 default: ShouldNotReachHere(); 1805 } 1806 } else { 1807 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1808 left->as_register_lo(); 1809 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1810 right->as_register_lo(); 1811 1812 switch (code) { 1813 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1814 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1815 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1816 default: ShouldNotReachHere(); 1817 } 1818 } 1819 } 1820 } 1821 1822 1823 int LIR_Assembler::shift_amount(BasicType t) { 1824 int elem_size = type2aelembytes(t); 1825 switch (elem_size) { 1826 case 1 : return 0; 1827 case 2 : return 1; 1828 case 4 : return 2; 1829 case 8 : return 3; 1830 } 1831 ShouldNotReachHere(); 1832 return -1; 1833 } 1834 1835 1836 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1837 assert(exceptionOop->as_register() == Oexception, "should match"); 1838 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 1839 1840 info->add_register_oop(exceptionOop); 1841 1842 // reuse the debug info from the safepoint poll for the throw op itself 1843 address pc_for_athrow = __ pc(); 1844 int pc_for_athrow_offset = __ offset(); 1845 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1846 __ set(pc_for_athrow, Oissuing_pc, rspec); 1847 add_call_info(pc_for_athrow_offset, info); // for exception handler 1848 1849 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 1850 __ delayed()->nop(); 1851 } 1852 1853 1854 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1855 assert(exceptionOop->as_register() == Oexception, "should match"); 1856 1857 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 1858 __ delayed()->nop(); 1859 } 1860 1861 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1862 Register src = op->src()->as_register(); 1863 Register dst = op->dst()->as_register(); 1864 Register src_pos = op->src_pos()->as_register(); 1865 Register dst_pos = op->dst_pos()->as_register(); 1866 Register length = op->length()->as_register(); 1867 Register tmp = op->tmp()->as_register(); 1868 Register tmp2 = O7; 1869 1870 int flags = op->flags(); 1871 ciArrayKlass* default_type = op->expected_type(); 1872 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1873 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1874 1875 // higher 32bits must be null 1876 __ sra(dst_pos, 0, dst_pos); 1877 __ sra(src_pos, 0, src_pos); 1878 __ sra(length, 0, length); 1879 1880 // set up the arraycopy stub information 1881 ArrayCopyStub* stub = op->stub(); 1882 1883 // always do stub if no type information is available. it's ok if 1884 // the known type isn't loaded since the code sanity checks 1885 // in debug mode and the type isn't required when we know the exact type 1886 // also check that the type is an array type. 1887 if (op->expected_type() == NULL) { 1888 __ mov(src, O0); 1889 __ mov(src_pos, O1); 1890 __ mov(dst, O2); 1891 __ mov(dst_pos, O3); 1892 __ mov(length, O4); 1893 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1894 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 1895 1896 #ifndef PRODUCT 1897 if (PrintC1Statistics) { 1898 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1899 __ inc_counter(counter, G1, G3); 1900 } 1901 #endif 1902 __ call_VM_leaf(tmp, copyfunc_addr); 1903 1904 __ xor3(O0, -1, tmp); 1905 __ sub(length, tmp, length); 1906 __ add(src_pos, tmp, src_pos); 1907 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 1908 __ delayed()->add(dst_pos, tmp, dst_pos); 1909 __ bind(*stub->continuation()); 1910 return; 1911 } 1912 1913 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1914 1915 // make sure src and dst are non-null and load array length 1916 if (flags & LIR_OpArrayCopy::src_null_check) { 1917 __ tst(src); 1918 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 1919 __ delayed()->nop(); 1920 } 1921 1922 if (flags & LIR_OpArrayCopy::dst_null_check) { 1923 __ tst(dst); 1924 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 1925 __ delayed()->nop(); 1926 } 1927 1928 // If the compiler was not able to prove that exact type of the source or the destination 1929 // of the arraycopy is an array type, check at runtime if the source or the destination is 1930 // an instance type. 1931 if (flags & LIR_OpArrayCopy::type_check) { 1932 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 1933 __ load_klass(dst, tmp); 1934 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 1935 __ cmp(tmp2, Klass::_lh_neutral_value); 1936 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 1937 __ delayed()->nop(); 1938 } 1939 1940 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 1941 __ load_klass(src, tmp); 1942 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 1943 __ cmp(tmp2, Klass::_lh_neutral_value); 1944 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 1945 __ delayed()->nop(); 1946 } 1947 } 1948 1949 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1950 // test src_pos register 1951 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 1952 __ delayed()->nop(); 1953 } 1954 1955 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1956 // test dst_pos register 1957 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 1958 __ delayed()->nop(); 1959 } 1960 1961 if (flags & LIR_OpArrayCopy::length_positive_check) { 1962 // make sure length isn't negative 1963 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 1964 __ delayed()->nop(); 1965 } 1966 1967 if (flags & LIR_OpArrayCopy::src_range_check) { 1968 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 1969 __ add(length, src_pos, tmp); 1970 __ cmp(tmp2, tmp); 1971 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 1972 __ delayed()->nop(); 1973 } 1974 1975 if (flags & LIR_OpArrayCopy::dst_range_check) { 1976 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 1977 __ add(length, dst_pos, tmp); 1978 __ cmp(tmp2, tmp); 1979 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 1980 __ delayed()->nop(); 1981 } 1982 1983 int shift = shift_amount(basic_type); 1984 1985 if (flags & LIR_OpArrayCopy::type_check) { 1986 // We don't know the array types are compatible 1987 if (basic_type != T_OBJECT) { 1988 // Simple test for basic type arrays 1989 if (UseCompressedClassPointers) { 1990 // We don't need decode because we just need to compare 1991 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 1992 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 1993 __ cmp(tmp, tmp2); 1994 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 1995 } else { 1996 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 1997 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 1998 __ cmp(tmp, tmp2); 1999 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2000 } 2001 __ delayed()->nop(); 2002 } else { 2003 // For object arrays, if src is a sub class of dst then we can 2004 // safely do the copy. 2005 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2006 2007 Label cont, slow; 2008 assert_different_registers(tmp, tmp2, G3, G1); 2009 2010 __ load_klass(src, G3); 2011 __ load_klass(dst, G1); 2012 2013 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2014 2015 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2016 __ delayed()->nop(); 2017 2018 __ cmp(G3, 0); 2019 if (copyfunc_addr != NULL) { // use stub if available 2020 // src is not a sub class of dst so we have to do a 2021 // per-element check. 2022 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2023 __ delayed()->nop(); 2024 2025 __ bind(slow); 2026 2027 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2028 if ((flags & mask) != mask) { 2029 // Check that at least both of them object arrays. 2030 assert(flags & mask, "one of the two should be known to be an object array"); 2031 2032 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2033 __ load_klass(src, tmp); 2034 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2035 __ load_klass(dst, tmp); 2036 } 2037 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2038 2039 __ lduw(tmp, lh_offset, tmp2); 2040 2041 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2042 __ set(objArray_lh, tmp); 2043 __ cmp(tmp, tmp2); 2044 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2045 __ delayed()->nop(); 2046 } 2047 2048 Register src_ptr = O0; 2049 Register dst_ptr = O1; 2050 Register len = O2; 2051 Register chk_off = O3; 2052 Register super_k = O4; 2053 2054 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2055 if (shift == 0) { 2056 __ add(src_ptr, src_pos, src_ptr); 2057 } else { 2058 __ sll(src_pos, shift, tmp); 2059 __ add(src_ptr, tmp, src_ptr); 2060 } 2061 2062 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2063 if (shift == 0) { 2064 __ add(dst_ptr, dst_pos, dst_ptr); 2065 } else { 2066 __ sll(dst_pos, shift, tmp); 2067 __ add(dst_ptr, tmp, dst_ptr); 2068 } 2069 __ mov(length, len); 2070 __ load_klass(dst, tmp); 2071 2072 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2073 __ ld_ptr(tmp, ek_offset, super_k); 2074 2075 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2076 __ lduw(super_k, sco_offset, chk_off); 2077 2078 __ call_VM_leaf(tmp, copyfunc_addr); 2079 2080 #ifndef PRODUCT 2081 if (PrintC1Statistics) { 2082 Label failed; 2083 __ br_notnull_short(O0, Assembler::pn, failed); 2084 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2085 __ bind(failed); 2086 } 2087 #endif 2088 2089 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2090 __ delayed()->xor3(O0, -1, tmp); 2091 2092 #ifndef PRODUCT 2093 if (PrintC1Statistics) { 2094 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2095 } 2096 #endif 2097 2098 __ sub(length, tmp, length); 2099 __ add(src_pos, tmp, src_pos); 2100 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2101 __ delayed()->add(dst_pos, tmp, dst_pos); 2102 2103 __ bind(cont); 2104 } else { 2105 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2106 __ delayed()->nop(); 2107 __ bind(cont); 2108 } 2109 } 2110 } 2111 2112 #ifdef ASSERT 2113 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2114 // Sanity check the known type with the incoming class. For the 2115 // primitive case the types must match exactly with src.klass and 2116 // dst.klass each exactly matching the default type. For the 2117 // object array case, if no type check is needed then either the 2118 // dst type is exactly the expected type and the src type is a 2119 // subtype which we can't check or src is the same array as dst 2120 // but not necessarily exactly of type default_type. 2121 Label known_ok, halt; 2122 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2123 if (UseCompressedClassPointers) { 2124 // tmp holds the default type. It currently comes uncompressed after the 2125 // load of a constant, so encode it. 2126 __ encode_klass_not_null(tmp); 2127 // load the raw value of the dst klass, since we will be comparing 2128 // uncompressed values directly. 2129 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2130 if (basic_type != T_OBJECT) { 2131 __ cmp(tmp, tmp2); 2132 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2133 // load the raw value of the src klass. 2134 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2135 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2136 } else { 2137 __ cmp(tmp, tmp2); 2138 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2139 __ delayed()->cmp(src, dst); 2140 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2141 __ delayed()->nop(); 2142 } 2143 } else { 2144 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2145 if (basic_type != T_OBJECT) { 2146 __ cmp(tmp, tmp2); 2147 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2148 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2149 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2150 } else { 2151 __ cmp(tmp, tmp2); 2152 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2153 __ delayed()->cmp(src, dst); 2154 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2155 __ delayed()->nop(); 2156 } 2157 } 2158 __ bind(halt); 2159 __ stop("incorrect type information in arraycopy"); 2160 __ bind(known_ok); 2161 } 2162 #endif 2163 2164 #ifndef PRODUCT 2165 if (PrintC1Statistics) { 2166 address counter = Runtime1::arraycopy_count_address(basic_type); 2167 __ inc_counter(counter, G1, G3); 2168 } 2169 #endif 2170 2171 Register src_ptr = O0; 2172 Register dst_ptr = O1; 2173 Register len = O2; 2174 2175 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2176 if (shift == 0) { 2177 __ add(src_ptr, src_pos, src_ptr); 2178 } else { 2179 __ sll(src_pos, shift, tmp); 2180 __ add(src_ptr, tmp, src_ptr); 2181 } 2182 2183 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2184 if (shift == 0) { 2185 __ add(dst_ptr, dst_pos, dst_ptr); 2186 } else { 2187 __ sll(dst_pos, shift, tmp); 2188 __ add(dst_ptr, tmp, dst_ptr); 2189 } 2190 2191 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2192 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2193 const char *name; 2194 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2195 2196 // arraycopy stubs takes a length in number of elements, so don't scale it. 2197 __ mov(length, len); 2198 __ call_VM_leaf(tmp, entry); 2199 2200 __ bind(*stub->continuation()); 2201 } 2202 2203 2204 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2205 if (dest->is_single_cpu()) { 2206 if (left->type() == T_OBJECT) { 2207 switch (code) { 2208 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2209 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2210 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2211 default: ShouldNotReachHere(); 2212 } 2213 } else 2214 switch (code) { 2215 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2216 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2217 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2218 default: ShouldNotReachHere(); 2219 } 2220 } else { 2221 switch (code) { 2222 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2223 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2224 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2225 default: ShouldNotReachHere(); 2226 } 2227 } 2228 } 2229 2230 2231 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2232 if (left->type() == T_OBJECT) { 2233 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2234 Register l = left->as_register(); 2235 Register d = dest->as_register_lo(); 2236 switch (code) { 2237 case lir_shl: __ sllx (l, count, d); break; 2238 case lir_shr: __ srax (l, count, d); break; 2239 case lir_ushr: __ srlx (l, count, d); break; 2240 default: ShouldNotReachHere(); 2241 } 2242 return; 2243 } 2244 2245 if (dest->is_single_cpu()) { 2246 count = count & 0x1F; // Java spec 2247 switch (code) { 2248 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2249 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2250 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2251 default: ShouldNotReachHere(); 2252 } 2253 } else if (dest->is_double_cpu()) { 2254 count = count & 63; // Java spec 2255 switch (code) { 2256 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2257 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2258 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2259 default: ShouldNotReachHere(); 2260 } 2261 } else { 2262 ShouldNotReachHere(); 2263 } 2264 } 2265 2266 2267 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2268 assert(op->tmp1()->as_register() == G1 && 2269 op->tmp2()->as_register() == G3 && 2270 op->tmp3()->as_register() == G4 && 2271 op->obj()->as_register() == O0 && 2272 op->klass()->as_register() == G5, "must be"); 2273 if (op->init_check()) { 2274 add_debug_info_for_null_check_here(op->stub()->info()); 2275 __ ldub(op->klass()->as_register(), 2276 in_bytes(InstanceKlass::init_state_offset()), 2277 op->tmp1()->as_register()); 2278 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2279 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2280 __ delayed()->nop(); 2281 } 2282 __ allocate_object(op->obj()->as_register(), 2283 op->tmp1()->as_register(), 2284 op->tmp2()->as_register(), 2285 op->tmp3()->as_register(), 2286 op->header_size(), 2287 op->object_size(), 2288 op->klass()->as_register(), 2289 *op->stub()->entry()); 2290 __ bind(*op->stub()->continuation()); 2291 __ verify_oop(op->obj()->as_register()); 2292 } 2293 2294 2295 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2296 assert(op->tmp1()->as_register() == G1 && 2297 op->tmp2()->as_register() == G3 && 2298 op->tmp3()->as_register() == G4 && 2299 op->tmp4()->as_register() == O1 && 2300 op->klass()->as_register() == G5, "must be"); 2301 2302 __ signx(op->len()->as_register()); 2303 if (UseSlowPath || 2304 (!UseFastNewObjectArray && is_reference_type(op->type())) || 2305 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 2306 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2307 __ delayed()->nop(); 2308 } else { 2309 __ allocate_array(op->obj()->as_register(), 2310 op->len()->as_register(), 2311 op->tmp1()->as_register(), 2312 op->tmp2()->as_register(), 2313 op->tmp3()->as_register(), 2314 arrayOopDesc::header_size(op->type()), 2315 type2aelembytes(op->type()), 2316 op->klass()->as_register(), 2317 *op->stub()->entry()); 2318 } 2319 __ bind(*op->stub()->continuation()); 2320 } 2321 2322 2323 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2324 ciMethodData *md, ciProfileData *data, 2325 Register recv, Register tmp1, Label* update_done) { 2326 uint i; 2327 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2328 Label next_test; 2329 // See if the receiver is receiver[n]. 2330 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2331 mdo_offset_bias); 2332 __ ld_ptr(receiver_addr, tmp1); 2333 __ verify_klass_ptr(tmp1); 2334 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2335 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2336 mdo_offset_bias); 2337 __ ld_ptr(data_addr, tmp1); 2338 __ add(tmp1, DataLayout::counter_increment, tmp1); 2339 __ st_ptr(tmp1, data_addr); 2340 __ ba(*update_done); 2341 __ delayed()->nop(); 2342 __ bind(next_test); 2343 } 2344 2345 // Didn't find receiver; find next empty slot and fill it in 2346 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2347 Label next_test; 2348 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2349 mdo_offset_bias); 2350 __ ld_ptr(recv_addr, tmp1); 2351 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2352 __ st_ptr(recv, recv_addr); 2353 __ set(DataLayout::counter_increment, tmp1); 2354 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2355 mdo_offset_bias); 2356 __ ba(*update_done); 2357 __ delayed()->nop(); 2358 __ bind(next_test); 2359 } 2360 } 2361 2362 2363 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2364 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2365 md = method->method_data_or_null(); 2366 assert(md != NULL, "Sanity"); 2367 data = md->bci_to_data(bci); 2368 assert(data != NULL, "need data for checkcast"); 2369 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2370 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2371 // The offset is large so bias the mdo by the base of the slot so 2372 // that the ld can use simm13s to reference the slots of the data 2373 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2374 } 2375 } 2376 2377 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2378 // we always need a stub for the failure case. 2379 CodeStub* stub = op->stub(); 2380 Register obj = op->object()->as_register(); 2381 Register k_RInfo = op->tmp1()->as_register(); 2382 Register klass_RInfo = op->tmp2()->as_register(); 2383 Register dst = op->result_opr()->as_register(); 2384 Register Rtmp1 = op->tmp3()->as_register(); 2385 ciKlass* k = op->klass(); 2386 2387 2388 if (obj == k_RInfo) { 2389 k_RInfo = klass_RInfo; 2390 klass_RInfo = obj; 2391 } 2392 2393 ciMethodData* md; 2394 ciProfileData* data; 2395 int mdo_offset_bias = 0; 2396 if (op->should_profile()) { 2397 ciMethod* method = op->profiled_method(); 2398 assert(method != NULL, "Should have method"); 2399 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2400 2401 Label not_null; 2402 __ br_notnull_short(obj, Assembler::pn, not_null); 2403 Register mdo = k_RInfo; 2404 Register data_val = Rtmp1; 2405 metadata2reg(md->constant_encoding(), mdo); 2406 if (mdo_offset_bias > 0) { 2407 __ set(mdo_offset_bias, data_val); 2408 __ add(mdo, data_val, mdo); 2409 } 2410 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2411 __ ldub(flags_addr, data_val); 2412 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2413 __ stb(data_val, flags_addr); 2414 __ ba(*obj_is_null); 2415 __ delayed()->nop(); 2416 __ bind(not_null); 2417 } else { 2418 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2419 __ delayed()->nop(); 2420 } 2421 2422 Label profile_cast_failure, profile_cast_success; 2423 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2424 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2425 2426 // patching may screw with our temporaries on sparc, 2427 // so let's do it before loading the class 2428 if (k->is_loaded()) { 2429 metadata2reg(k->constant_encoding(), k_RInfo); 2430 } else { 2431 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2432 } 2433 assert(obj != k_RInfo, "must be different"); 2434 2435 // get object class 2436 // not a safepoint as obj null check happens earlier 2437 __ load_klass(obj, klass_RInfo); 2438 if (op->fast_check()) { 2439 assert_different_registers(klass_RInfo, k_RInfo); 2440 __ cmp(k_RInfo, klass_RInfo); 2441 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2442 __ delayed()->nop(); 2443 } else { 2444 bool need_slow_path = true; 2445 if (k->is_loaded()) { 2446 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2447 need_slow_path = false; 2448 // perform the fast part of the checking logic 2449 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2450 (need_slow_path ? success_target : NULL), 2451 failure_target, NULL, 2452 RegisterOrConstant(k->super_check_offset())); 2453 } else { 2454 // perform the fast part of the checking logic 2455 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2456 failure_target, NULL); 2457 } 2458 if (need_slow_path) { 2459 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2460 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2461 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2462 __ delayed()->nop(); 2463 __ cmp(G3, 0); 2464 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2465 __ delayed()->nop(); 2466 // Fall through to success case 2467 } 2468 } 2469 2470 if (op->should_profile()) { 2471 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2472 assert_different_registers(obj, mdo, recv, tmp1); 2473 __ bind(profile_cast_success); 2474 metadata2reg(md->constant_encoding(), mdo); 2475 if (mdo_offset_bias > 0) { 2476 __ set(mdo_offset_bias, tmp1); 2477 __ add(mdo, tmp1, mdo); 2478 } 2479 __ load_klass(obj, recv); 2480 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2481 // Jump over the failure case 2482 __ ba(*success); 2483 __ delayed()->nop(); 2484 // Cast failure case 2485 __ bind(profile_cast_failure); 2486 metadata2reg(md->constant_encoding(), mdo); 2487 if (mdo_offset_bias > 0) { 2488 __ set(mdo_offset_bias, tmp1); 2489 __ add(mdo, tmp1, mdo); 2490 } 2491 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2492 __ ld_ptr(data_addr, tmp1); 2493 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2494 __ st_ptr(tmp1, data_addr); 2495 __ ba(*failure); 2496 __ delayed()->nop(); 2497 } 2498 __ ba(*success); 2499 __ delayed()->nop(); 2500 } 2501 2502 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2503 LIR_Code code = op->code(); 2504 if (code == lir_store_check) { 2505 Register value = op->object()->as_register(); 2506 Register array = op->array()->as_register(); 2507 Register k_RInfo = op->tmp1()->as_register(); 2508 Register klass_RInfo = op->tmp2()->as_register(); 2509 Register Rtmp1 = op->tmp3()->as_register(); 2510 2511 __ verify_oop(value); 2512 CodeStub* stub = op->stub(); 2513 // check if it needs to be profiled 2514 ciMethodData* md; 2515 ciProfileData* data; 2516 int mdo_offset_bias = 0; 2517 if (op->should_profile()) { 2518 ciMethod* method = op->profiled_method(); 2519 assert(method != NULL, "Should have method"); 2520 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2521 } 2522 Label profile_cast_success, profile_cast_failure, done; 2523 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2524 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2525 2526 if (op->should_profile()) { 2527 Label not_null; 2528 __ br_notnull_short(value, Assembler::pn, not_null); 2529 Register mdo = k_RInfo; 2530 Register data_val = Rtmp1; 2531 metadata2reg(md->constant_encoding(), mdo); 2532 if (mdo_offset_bias > 0) { 2533 __ set(mdo_offset_bias, data_val); 2534 __ add(mdo, data_val, mdo); 2535 } 2536 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2537 __ ldub(flags_addr, data_val); 2538 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2539 __ stb(data_val, flags_addr); 2540 __ ba_short(done); 2541 __ bind(not_null); 2542 } else { 2543 __ br_null_short(value, Assembler::pn, done); 2544 } 2545 add_debug_info_for_null_check_here(op->info_for_exception()); 2546 __ load_klass(array, k_RInfo); 2547 __ load_klass(value, klass_RInfo); 2548 2549 // get instance klass 2550 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo); 2551 // perform the fast part of the checking logic 2552 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2553 2554 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2555 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2556 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2557 __ delayed()->nop(); 2558 __ cmp(G3, 0); 2559 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2560 __ delayed()->nop(); 2561 // fall through to the success case 2562 2563 if (op->should_profile()) { 2564 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2565 assert_different_registers(value, mdo, recv, tmp1); 2566 __ bind(profile_cast_success); 2567 metadata2reg(md->constant_encoding(), mdo); 2568 if (mdo_offset_bias > 0) { 2569 __ set(mdo_offset_bias, tmp1); 2570 __ add(mdo, tmp1, mdo); 2571 } 2572 __ load_klass(value, recv); 2573 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2574 __ ba_short(done); 2575 // Cast failure case 2576 __ bind(profile_cast_failure); 2577 metadata2reg(md->constant_encoding(), mdo); 2578 if (mdo_offset_bias > 0) { 2579 __ set(mdo_offset_bias, tmp1); 2580 __ add(mdo, tmp1, mdo); 2581 } 2582 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2583 __ ld_ptr(data_addr, tmp1); 2584 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2585 __ st_ptr(tmp1, data_addr); 2586 __ ba(*stub->entry()); 2587 __ delayed()->nop(); 2588 } 2589 __ bind(done); 2590 } else if (code == lir_checkcast) { 2591 Register obj = op->object()->as_register(); 2592 Register dst = op->result_opr()->as_register(); 2593 Label success; 2594 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2595 __ bind(success); 2596 __ mov(obj, dst); 2597 } else if (code == lir_instanceof) { 2598 Register obj = op->object()->as_register(); 2599 Register dst = op->result_opr()->as_register(); 2600 Label success, failure, done; 2601 emit_typecheck_helper(op, &success, &failure, &failure); 2602 __ bind(failure); 2603 __ set(0, dst); 2604 __ ba_short(done); 2605 __ bind(success); 2606 __ set(1, dst); 2607 __ bind(done); 2608 } else { 2609 ShouldNotReachHere(); 2610 } 2611 2612 } 2613 2614 2615 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2616 if (op->code() == lir_cas_long) { 2617 assert(VM_Version::supports_cx8(), "wrong machine"); 2618 Register addr = op->addr()->as_pointer_register(); 2619 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2620 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2621 Register new_value_lo = op->new_value()->as_register_lo(); 2622 Register new_value_hi = op->new_value()->as_register_hi(); 2623 Register t1 = op->tmp1()->as_register(); 2624 Register t2 = op->tmp2()->as_register(); 2625 __ mov(cmp_value_lo, t1); 2626 __ mov(new_value_lo, t2); 2627 // perform the compare and swap operation 2628 __ casx(addr, t1, t2); 2629 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2630 // overwritten with the original value in "addr" and will be equal to t1. 2631 __ cmp(t1, t2); 2632 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2633 Register addr = op->addr()->as_pointer_register(); 2634 Register cmp_value = op->cmp_value()->as_register(); 2635 Register new_value = op->new_value()->as_register(); 2636 Register t1 = op->tmp1()->as_register(); 2637 Register t2 = op->tmp2()->as_register(); 2638 __ mov(cmp_value, t1); 2639 __ mov(new_value, t2); 2640 if (op->code() == lir_cas_obj) { 2641 if (UseCompressedOops) { 2642 __ encode_heap_oop(t1); 2643 __ encode_heap_oop(t2); 2644 __ cas(addr, t1, t2); 2645 } else { 2646 __ cas_ptr(addr, t1, t2); 2647 } 2648 } else { 2649 __ cas(addr, t1, t2); 2650 } 2651 __ cmp(t1, t2); 2652 } else { 2653 Unimplemented(); 2654 } 2655 } 2656 2657 void LIR_Assembler::breakpoint() { 2658 __ breakpoint_trap(); 2659 } 2660 2661 2662 void LIR_Assembler::push(LIR_Opr opr) { 2663 Unimplemented(); 2664 } 2665 2666 2667 void LIR_Assembler::pop(LIR_Opr opr) { 2668 Unimplemented(); 2669 } 2670 2671 2672 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2673 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2674 Register dst = dst_opr->as_register(); 2675 Register reg = mon_addr.base(); 2676 int offset = mon_addr.disp(); 2677 // compute pointer to BasicLock 2678 if (mon_addr.is_simm13()) { 2679 __ add(reg, offset, dst); 2680 } else { 2681 __ set(offset, dst); 2682 __ add(dst, reg, dst); 2683 } 2684 } 2685 2686 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2687 assert(op->crc()->is_single_cpu(), "crc must be register"); 2688 assert(op->val()->is_single_cpu(), "byte value must be register"); 2689 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2690 Register crc = op->crc()->as_register(); 2691 Register val = op->val()->as_register(); 2692 Register table = op->result_opr()->as_register(); 2693 Register res = op->result_opr()->as_register(); 2694 2695 assert_different_registers(val, crc, table); 2696 2697 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 2698 __ not1(crc); 2699 __ clruwu(crc); 2700 __ update_byte_crc32(crc, val, table); 2701 __ not1(crc); 2702 2703 __ mov(crc, res); 2704 } 2705 2706 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2707 Register obj = op->obj_opr()->as_register(); 2708 Register hdr = op->hdr_opr()->as_register(); 2709 Register lock = op->lock_opr()->as_register(); 2710 2711 // obj may not be an oop 2712 if (op->code() == lir_lock) { 2713 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2714 if (UseFastLocking) { 2715 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2716 // add debug info for NullPointerException only if one is possible 2717 if (op->info() != NULL) { 2718 add_debug_info_for_null_check_here(op->info()); 2719 } 2720 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2721 } else { 2722 // always do slow locking 2723 // note: the slow locking code could be inlined here, however if we use 2724 // slow locking, speed doesn't matter anyway and this solution is 2725 // simpler and requires less duplicated code - additionally, the 2726 // slow locking code is the same in either case which simplifies 2727 // debugging 2728 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2729 __ delayed()->nop(); 2730 } 2731 } else { 2732 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2733 if (UseFastLocking) { 2734 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2735 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2736 } else { 2737 // always do slow unlocking 2738 // note: the slow unlocking code could be inlined here, however if we use 2739 // slow unlocking, speed doesn't matter anyway and this solution is 2740 // simpler and requires less duplicated code - additionally, the 2741 // slow unlocking code is the same in either case which simplifies 2742 // debugging 2743 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2744 __ delayed()->nop(); 2745 } 2746 } 2747 __ bind(*op->stub()->continuation()); 2748 } 2749 2750 2751 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2752 ciMethod* method = op->profiled_method(); 2753 int bci = op->profiled_bci(); 2754 ciMethod* callee = op->profiled_callee(); 2755 2756 // Update counter for all call types 2757 ciMethodData* md = method->method_data_or_null(); 2758 assert(md != NULL, "Sanity"); 2759 ciProfileData* data = md->bci_to_data(bci); 2760 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2761 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2762 Register mdo = op->mdo()->as_register(); 2763 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2764 Register tmp1 = op->tmp1()->as_register_lo(); 2765 metadata2reg(md->constant_encoding(), mdo); 2766 int mdo_offset_bias = 0; 2767 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2768 data->size_in_bytes())) { 2769 // The offset is large so bias the mdo by the base of the slot so 2770 // that the ld can use simm13s to reference the slots of the data 2771 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2772 __ set(mdo_offset_bias, O7); 2773 __ add(mdo, O7, mdo); 2774 } 2775 2776 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2777 // Perform additional virtual call profiling for invokevirtual and 2778 // invokeinterface bytecodes 2779 if (op->should_profile_receiver_type()) { 2780 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2781 Register recv = op->recv()->as_register(); 2782 assert_different_registers(mdo, tmp1, recv); 2783 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2784 ciKlass* known_klass = op->known_holder(); 2785 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2786 // We know the type that will be seen at this call site; we can 2787 // statically update the MethodData* rather than needing to do 2788 // dynamic tests on the receiver type 2789 2790 // NOTE: we should probably put a lock around this search to 2791 // avoid collisions by concurrent compilations 2792 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2793 uint i; 2794 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2795 ciKlass* receiver = vc_data->receiver(i); 2796 if (known_klass->equals(receiver)) { 2797 Address data_addr(mdo, md->byte_offset_of_slot(data, 2798 VirtualCallData::receiver_count_offset(i)) - 2799 mdo_offset_bias); 2800 __ ld_ptr(data_addr, tmp1); 2801 __ add(tmp1, DataLayout::counter_increment, tmp1); 2802 __ st_ptr(tmp1, data_addr); 2803 return; 2804 } 2805 } 2806 2807 // Receiver type not found in profile data; select an empty slot 2808 2809 // Note that this is less efficient than it should be because it 2810 // always does a write to the receiver part of the 2811 // VirtualCallData rather than just the first time 2812 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2813 ciKlass* receiver = vc_data->receiver(i); 2814 if (receiver == NULL) { 2815 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2816 mdo_offset_bias); 2817 metadata2reg(known_klass->constant_encoding(), tmp1); 2818 __ st_ptr(tmp1, recv_addr); 2819 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2820 mdo_offset_bias); 2821 __ ld_ptr(data_addr, tmp1); 2822 __ add(tmp1, DataLayout::counter_increment, tmp1); 2823 __ st_ptr(tmp1, data_addr); 2824 return; 2825 } 2826 } 2827 } else { 2828 __ load_klass(recv, recv); 2829 Label update_done; 2830 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2831 // Receiver did not match any saved receiver and there is no empty row for it. 2832 // Increment total counter to indicate polymorphic case. 2833 __ ld_ptr(counter_addr, tmp1); 2834 __ add(tmp1, DataLayout::counter_increment, tmp1); 2835 __ st_ptr(tmp1, counter_addr); 2836 2837 __ bind(update_done); 2838 } 2839 } else { 2840 // Static call 2841 __ ld_ptr(counter_addr, tmp1); 2842 __ add(tmp1, DataLayout::counter_increment, tmp1); 2843 __ st_ptr(tmp1, counter_addr); 2844 } 2845 } 2846 2847 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2848 Register obj = op->obj()->as_register(); 2849 Register tmp1 = op->tmp()->as_pointer_register(); 2850 Register tmp2 = G1; 2851 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2852 ciKlass* exact_klass = op->exact_klass(); 2853 intptr_t current_klass = op->current_klass(); 2854 bool not_null = op->not_null(); 2855 bool no_conflict = op->no_conflict(); 2856 2857 Label update, next, none; 2858 2859 bool do_null = !not_null; 2860 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2861 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2862 2863 assert(do_null || do_update, "why are we here?"); 2864 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2865 2866 __ verify_oop(obj); 2867 2868 if (tmp1 != obj) { 2869 __ mov(obj, tmp1); 2870 } 2871 if (do_null) { 2872 __ br_notnull_short(tmp1, Assembler::pt, update); 2873 if (!TypeEntries::was_null_seen(current_klass)) { 2874 __ ld_ptr(mdo_addr, tmp1); 2875 __ or3(tmp1, TypeEntries::null_seen, tmp1); 2876 __ st_ptr(tmp1, mdo_addr); 2877 } 2878 if (do_update) { 2879 __ ba(next); 2880 __ delayed()->nop(); 2881 } 2882 #ifdef ASSERT 2883 } else { 2884 __ br_notnull_short(tmp1, Assembler::pt, update); 2885 __ stop("unexpect null obj"); 2886 #endif 2887 } 2888 2889 __ bind(update); 2890 2891 if (do_update) { 2892 #ifdef ASSERT 2893 if (exact_klass != NULL) { 2894 Label ok; 2895 __ load_klass(tmp1, tmp1); 2896 metadata2reg(exact_klass->constant_encoding(), tmp2); 2897 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); 2898 __ stop("exact klass and actual klass differ"); 2899 __ bind(ok); 2900 } 2901 #endif 2902 2903 Label do_update; 2904 __ ld_ptr(mdo_addr, tmp2); 2905 2906 if (!no_conflict) { 2907 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2908 if (exact_klass != NULL) { 2909 metadata2reg(exact_klass->constant_encoding(), tmp1); 2910 } else { 2911 __ load_klass(tmp1, tmp1); 2912 } 2913 2914 __ xor3(tmp1, tmp2, tmp1); 2915 __ btst(TypeEntries::type_klass_mask, tmp1); 2916 // klass seen before, nothing to do. The unknown bit may have been 2917 // set already but no need to check. 2918 __ brx(Assembler::zero, false, Assembler::pt, next); 2919 __ delayed()-> 2920 2921 btst(TypeEntries::type_unknown, tmp1); 2922 // already unknown. Nothing to do anymore. 2923 __ brx(Assembler::notZero, false, Assembler::pt, next); 2924 2925 if (TypeEntries::is_type_none(current_klass)) { 2926 __ delayed()->btst(TypeEntries::type_mask, tmp2); 2927 __ brx(Assembler::zero, true, Assembler::pt, do_update); 2928 // first time here. Set profile type. 2929 __ delayed()->or3(tmp2, tmp1, tmp2); 2930 } else { 2931 __ delayed()->nop(); 2932 } 2933 } else { 2934 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2935 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2936 2937 __ btst(TypeEntries::type_unknown, tmp2); 2938 // already unknown. Nothing to do anymore. 2939 __ brx(Assembler::notZero, false, Assembler::pt, next); 2940 __ delayed()->nop(); 2941 } 2942 2943 // different than before. Cannot keep accurate profile. 2944 __ or3(tmp2, TypeEntries::type_unknown, tmp2); 2945 } else { 2946 // There's a single possible klass at this profile point 2947 assert(exact_klass != NULL, "should be"); 2948 if (TypeEntries::is_type_none(current_klass)) { 2949 metadata2reg(exact_klass->constant_encoding(), tmp1); 2950 __ xor3(tmp1, tmp2, tmp1); 2951 __ btst(TypeEntries::type_klass_mask, tmp1); 2952 __ brx(Assembler::zero, false, Assembler::pt, next); 2953 #ifdef ASSERT 2954 2955 { 2956 Label ok; 2957 __ delayed()->btst(TypeEntries::type_mask, tmp2); 2958 __ brx(Assembler::zero, true, Assembler::pt, ok); 2959 __ delayed()->nop(); 2960 2961 __ stop("unexpected profiling mismatch"); 2962 __ bind(ok); 2963 } 2964 // first time here. Set profile type. 2965 __ or3(tmp2, tmp1, tmp2); 2966 #else 2967 // first time here. Set profile type. 2968 __ delayed()->or3(tmp2, tmp1, tmp2); 2969 #endif 2970 2971 } else { 2972 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2973 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 2974 2975 // already unknown. Nothing to do anymore. 2976 __ btst(TypeEntries::type_unknown, tmp2); 2977 __ brx(Assembler::notZero, false, Assembler::pt, next); 2978 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); 2979 } 2980 } 2981 2982 __ bind(do_update); 2983 __ st_ptr(tmp2, mdo_addr); 2984 2985 __ bind(next); 2986 } 2987 } 2988 2989 void LIR_Assembler::align_backward_branch_target() { 2990 __ align(OptoLoopAlignment); 2991 } 2992 2993 2994 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2995 // make sure we are expecting a delay 2996 // this has the side effect of clearing the delay state 2997 // so we can use _masm instead of _masm->delayed() to do the 2998 // code generation. 2999 __ delayed(); 3000 3001 // make sure we only emit one instruction 3002 int offset = code_offset(); 3003 op->delay_op()->emit_code(this); 3004 #ifdef ASSERT 3005 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3006 op->delay_op()->print(); 3007 } 3008 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3009 "only one instruction can go in a delay slot"); 3010 #endif 3011 3012 // we may also be emitting the call info for the instruction 3013 // which we are the delay slot of. 3014 CodeEmitInfo* call_info = op->call_info(); 3015 if (call_info) { 3016 add_call_info(code_offset(), call_info); 3017 } 3018 3019 if (VerifyStackAtCalls) { 3020 _masm->sub(FP, SP, O7); 3021 _masm->cmp(O7, initial_frame_size_in_bytes()); 3022 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3023 } 3024 } 3025 3026 3027 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 3028 // tmp must be unused 3029 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3030 assert(left->is_register(), "can only handle registers"); 3031 3032 if (left->is_single_cpu()) { 3033 __ neg(left->as_register(), dest->as_register()); 3034 } else if (left->is_single_fpu()) { 3035 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3036 } else if (left->is_double_fpu()) { 3037 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3038 } else { 3039 assert (left->is_double_cpu(), "Must be a long"); 3040 Register Rlow = left->as_register_lo(); 3041 Register Rhi = left->as_register_hi(); 3042 __ sub(G0, Rlow, dest->as_register_lo()); 3043 } 3044 } 3045 3046 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3047 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3048 3049 // if tmp is invalid, then the function being called doesn't destroy the thread 3050 if (tmp->is_valid()) { 3051 __ save_thread(tmp->as_pointer_register()); 3052 } 3053 __ call(dest, relocInfo::runtime_call_type); 3054 __ delayed()->nop(); 3055 if (info != NULL) { 3056 add_call_info_here(info); 3057 } 3058 if (tmp->is_valid()) { 3059 __ restore_thread(tmp->as_pointer_register()); 3060 } 3061 3062 #ifdef ASSERT 3063 __ verify_thread(); 3064 #endif // ASSERT 3065 } 3066 3067 3068 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3069 ShouldNotReachHere(); 3070 3071 NEEDS_CLEANUP; 3072 if (type == T_LONG) { 3073 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3074 3075 // (extended to allow indexed as well as constant displaced for JSR-166) 3076 Register idx = noreg; // contains either constant offset or index 3077 3078 int disp = mem_addr->disp(); 3079 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3080 if (!Assembler::is_simm13(disp)) { 3081 idx = O7; 3082 __ set(disp, idx); 3083 } 3084 } else { 3085 assert(disp == 0, "not both indexed and disp"); 3086 idx = mem_addr->index()->as_register(); 3087 } 3088 3089 int null_check_offset = -1; 3090 3091 Register base = mem_addr->base()->as_register(); 3092 if (src->is_register() && dest->is_address()) { 3093 // G4 is high half, G5 is low half 3094 // clear the top bits of G5, and scale up G4 3095 __ srl (src->as_register_lo(), 0, G5); 3096 __ sllx(src->as_register_hi(), 32, G4); 3097 // combine the two halves into the 64 bits of G4 3098 __ or3(G4, G5, G4); 3099 null_check_offset = __ offset(); 3100 if (idx == noreg) { 3101 __ stx(G4, base, disp); 3102 } else { 3103 __ stx(G4, base, idx); 3104 } 3105 } else if (src->is_address() && dest->is_register()) { 3106 null_check_offset = __ offset(); 3107 if (idx == noreg) { 3108 __ ldx(base, disp, G5); 3109 } else { 3110 __ ldx(base, idx, G5); 3111 } 3112 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3113 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3114 } else { 3115 Unimplemented(); 3116 } 3117 if (info != NULL) { 3118 add_debug_info_for_null_check(null_check_offset, info); 3119 } 3120 3121 } else { 3122 // use normal move for all other volatiles since they don't need 3123 // special handling to remain atomic. 3124 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3125 } 3126 } 3127 3128 void LIR_Assembler::membar() { 3129 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3130 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3131 } 3132 3133 void LIR_Assembler::membar_acquire() { 3134 // no-op on TSO 3135 } 3136 3137 void LIR_Assembler::membar_release() { 3138 // no-op on TSO 3139 } 3140 3141 void LIR_Assembler::membar_loadload() { 3142 // no-op 3143 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3144 } 3145 3146 void LIR_Assembler::membar_storestore() { 3147 // no-op 3148 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3149 } 3150 3151 void LIR_Assembler::membar_loadstore() { 3152 // no-op 3153 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3154 } 3155 3156 void LIR_Assembler::membar_storeload() { 3157 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3158 } 3159 3160 void LIR_Assembler::on_spin_wait() { 3161 Unimplemented(); 3162 } 3163 3164 // Pack two sequential registers containing 32 bit values 3165 // into a single 64 bit register. 3166 // src and src->successor() are packed into dst 3167 // src and dst may be the same register. 3168 // Note: src is destroyed 3169 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3170 Register rs = src->as_register(); 3171 Register rd = dst->as_register_lo(); 3172 __ sllx(rs, 32, rs); 3173 __ srl(rs->successor(), 0, rs->successor()); 3174 __ or3(rs, rs->successor(), rd); 3175 } 3176 3177 // Unpack a 64 bit value in a register into 3178 // two sequential registers. 3179 // src is unpacked into dst and dst->successor() 3180 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3181 Register rs = src->as_register_lo(); 3182 Register rd = dst->as_register_hi(); 3183 assert_different_registers(rs, rd, rd->successor()); 3184 __ srlx(rs, 32, rd); 3185 __ srl (rs, 0, rd->successor()); 3186 } 3187 3188 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 3189 const LIR_Address* addr = addr_opr->as_address_ptr(); 3190 assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); 3191 const Register dest_reg = dest->as_pointer_register(); 3192 const Register base_reg = addr->base()->as_pointer_register(); 3193 3194 if (patch_code != lir_patch_none) { 3195 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id); 3196 assert(addr->disp() != 0, "must have"); 3197 assert(base_reg != G3_scratch, "invariant"); 3198 __ patchable_set(0, G3_scratch); 3199 patching_epilog(patch, patch_code, base_reg, info); 3200 assert(dest_reg != G3_scratch, "invariant"); 3201 if (addr->index()->is_valid()) { 3202 const Register index_reg = addr->index()->as_pointer_register(); 3203 assert(index_reg != G3_scratch, "invariant"); 3204 __ add(index_reg, G3_scratch, G3_scratch); 3205 } 3206 __ add(base_reg, G3_scratch, dest_reg); 3207 } else { 3208 if (Assembler::is_simm13(addr->disp())) { 3209 if (addr->index()->is_valid()) { 3210 const Register index_reg = addr->index()->as_pointer_register(); 3211 assert(index_reg != G3_scratch, "invariant"); 3212 __ add(base_reg, addr->disp(), G3_scratch); 3213 __ add(index_reg, G3_scratch, dest_reg); 3214 } else { 3215 __ add(base_reg, addr->disp(), dest_reg); 3216 } 3217 } else { 3218 __ set(addr->disp(), G3_scratch); 3219 if (addr->index()->is_valid()) { 3220 const Register index_reg = addr->index()->as_pointer_register(); 3221 assert(index_reg != G3_scratch, "invariant"); 3222 __ add(index_reg, G3_scratch, G3_scratch); 3223 } 3224 __ add(base_reg, G3_scratch, dest_reg); 3225 } 3226 } 3227 } 3228 3229 3230 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3231 assert(result_reg->is_register(), "check"); 3232 __ mov(G2_thread, result_reg->as_register()); 3233 } 3234 3235 #ifdef ASSERT 3236 // emit run-time assertion 3237 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3238 assert(op->code() == lir_assert, "must be"); 3239 3240 if (op->in_opr1()->is_valid()) { 3241 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3242 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3243 } else { 3244 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3245 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3246 } 3247 3248 Label ok; 3249 if (op->condition() != lir_cond_always) { 3250 Assembler::Condition acond; 3251 switch (op->condition()) { 3252 case lir_cond_equal: acond = Assembler::equal; break; 3253 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3254 case lir_cond_less: acond = Assembler::less; break; 3255 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3256 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 3257 case lir_cond_greater: acond = Assembler::greater; break; 3258 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 3259 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 3260 default: ShouldNotReachHere(); 3261 }; 3262 __ br(acond, false, Assembler::pt, ok); 3263 __ delayed()->nop(); 3264 } 3265 if (op->halt()) { 3266 const char* str = __ code_string(op->msg()); 3267 __ stop(str); 3268 } else { 3269 breakpoint(); 3270 } 3271 __ bind(ok); 3272 } 3273 #endif 3274 3275 void LIR_Assembler::peephole(LIR_List* lir) { 3276 LIR_OpList* inst = lir->instructions_list(); 3277 for (int i = 0; i < inst->length(); i++) { 3278 LIR_Op* op = inst->at(i); 3279 switch (op->code()) { 3280 case lir_cond_float_branch: 3281 case lir_branch: { 3282 LIR_OpBranch* branch = op->as_OpBranch(); 3283 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3284 LIR_Op* delay_op = NULL; 3285 // we'd like to be able to pull following instructions into 3286 // this slot but we don't know enough to do it safely yet so 3287 // only optimize block to block control flow. 3288 if (LIRFillDelaySlots && branch->block()) { 3289 LIR_Op* prev = inst->at(i - 1); 3290 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3291 // swap previous instruction into delay slot 3292 inst->at_put(i - 1, op); 3293 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3294 #ifndef PRODUCT 3295 if (LIRTracePeephole) { 3296 tty->print_cr("delayed"); 3297 inst->at(i - 1)->print(); 3298 inst->at(i)->print(); 3299 tty->cr(); 3300 } 3301 #endif 3302 continue; 3303 } 3304 } 3305 3306 if (!delay_op) { 3307 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3308 } 3309 inst->insert_before(i + 1, delay_op); 3310 break; 3311 } 3312 case lir_static_call: 3313 case lir_virtual_call: 3314 case lir_icvirtual_call: 3315 case lir_optvirtual_call: 3316 case lir_dynamic_call: { 3317 LIR_Op* prev = inst->at(i - 1); 3318 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3319 (op->code() != lir_virtual_call || 3320 !prev->result_opr()->is_single_cpu() || 3321 prev->result_opr()->as_register() != O0) && 3322 LIR_Assembler::is_single_instruction(prev)) { 3323 // Only moves without info can be put into the delay slot. 3324 // Also don't allow the setup of the receiver in the delay 3325 // slot for vtable calls. 3326 inst->at_put(i - 1, op); 3327 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3328 #ifndef PRODUCT 3329 if (LIRTracePeephole) { 3330 tty->print_cr("delayed"); 3331 inst->at(i - 1)->print(); 3332 inst->at(i)->print(); 3333 tty->cr(); 3334 } 3335 #endif 3336 } else { 3337 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3338 inst->insert_before(i + 1, delay_op); 3339 i++; 3340 } 3341 break; 3342 } 3343 } 3344 } 3345 } 3346 3347 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3348 LIR_Address* addr = src->as_address_ptr(); 3349 3350 assert(data == dest, "swap uses only 2 operands"); 3351 assert (code == lir_xchg, "no xadd on sparc"); 3352 3353 if (data->type() == T_INT) { 3354 __ swap(as_Address(addr), data->as_register()); 3355 } else if (data->is_oop()) { 3356 Register obj = data->as_register(); 3357 Register narrow = tmp->as_register(); 3358 assert(UseCompressedOops, "swap is 32bit only"); 3359 __ encode_heap_oop(obj, narrow); 3360 __ swap(as_Address(addr), narrow); 3361 __ decode_heap_oop(narrow, obj); 3362 } else { 3363 ShouldNotReachHere(); 3364 } 3365 } 3366 3367 #undef __