1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/cardTableBarrierSet.hpp" 36 #include "gc/shared/collectedHeap.hpp" 37 #include "nativeInst_sparc.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/jniHandles.inline.hpp" 42 #include "runtime/safepointMechanism.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 #define __ _masm-> 46 47 48 //------------------------------------------------------------ 49 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 if (opr->is_constant()) { 53 LIR_Const* constant = opr->as_constant_ptr(); 54 switch (constant->type()) { 55 case T_INT: { 56 jint value = constant->as_jint(); 57 return Assembler::is_simm13(value); 58 } 59 60 default: 61 return false; 62 } 63 } 64 return false; 65 } 66 67 68 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 69 switch (op->code()) { 70 case lir_null_check: 71 return true; 72 73 74 case lir_add: 75 case lir_ushr: 76 case lir_shr: 77 case lir_shl: 78 // integer shifts and adds are always one instruction 79 return op->result_opr()->is_single_cpu(); 80 81 82 case lir_move: { 83 LIR_Op1* op1 = op->as_Op1(); 84 LIR_Opr src = op1->in_opr(); 85 LIR_Opr dst = op1->result_opr(); 86 87 if (src == dst) { 88 NEEDS_CLEANUP; 89 // this works around a problem where moves with the same src and dst 90 // end up in the delay slot and then the assembler swallows the mov 91 // since it has no effect and then it complains because the delay slot 92 // is empty. returning false stops the optimizer from putting this in 93 // the delay slot 94 return false; 95 } 96 97 // don't put moves involving oops into the delay slot since the VerifyOops code 98 // will make it much larger than a single instruction. 99 if (VerifyOops) { 100 return false; 101 } 102 103 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 104 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 105 return false; 106 } 107 108 if (UseCompressedOops) { 109 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 110 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 111 } 112 113 if (UseCompressedClassPointers) { 114 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && 115 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; 116 } 117 118 if (dst->is_register()) { 119 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 120 return !PatchALot; 121 } else if (src->is_single_stack()) { 122 return true; 123 } 124 } 125 126 if (src->is_register()) { 127 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 128 return !PatchALot; 129 } else if (dst->is_single_stack()) { 130 return true; 131 } 132 } 133 134 if (dst->is_register() && 135 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 136 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 137 return true; 138 } 139 140 return false; 141 } 142 143 default: 144 return false; 145 } 146 ShouldNotReachHere(); 147 } 148 149 150 LIR_Opr LIR_Assembler::receiverOpr() { 151 return FrameMap::O0_oop_opr; 152 } 153 154 155 LIR_Opr LIR_Assembler::osrBufferPointer() { 156 return FrameMap::I0_opr; 157 } 158 159 160 int LIR_Assembler::initial_frame_size_in_bytes() const { 161 return in_bytes(frame_map()->framesize_in_bytes()); 162 } 163 164 165 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 166 // we fetch the class of the receiver (O0) and compare it with the cached class. 167 // If they do not match we jump to slow case. 168 int LIR_Assembler::check_icache() { 169 int offset = __ offset(); 170 __ inline_cache_check(O0, G5_inline_cache_reg); 171 return offset; 172 } 173 174 void LIR_Assembler::clinit_barrier(ciMethod* method) { 175 ShouldNotReachHere(); // not implemented 176 } 177 178 void LIR_Assembler::osr_entry() { 179 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 180 // 181 // 1. Create a new compiled activation. 182 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 183 // at the osr_bci; it is not initialized. 184 // 3. Jump to the continuation address in compiled code to resume execution. 185 186 // OSR entry point 187 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 188 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 189 ValueStack* entry_state = osr_entry->end()->state(); 190 int number_of_locks = entry_state->locks_size(); 191 192 // Create a frame for the compiled activation. 193 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 194 195 // OSR buffer is 196 // 197 // locals[nlocals-1..0] 198 // monitors[number_of_locks-1..0] 199 // 200 // locals is a direct copy of the interpreter frame so in the osr buffer 201 // so first slot in the local array is the last local from the interpreter 202 // and last slot is local[0] (receiver) from the interpreter 203 // 204 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 205 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 206 // in the interpreter frame (the method lock if a sync method) 207 208 // Initialize monitors in the compiled activation. 209 // I0: pointer to osr buffer 210 // 211 // All other registers are dead at this point and the locals will be 212 // copied into place by code emitted in the IR. 213 214 Register OSR_buf = osrBufferPointer()->as_register(); 215 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 216 int monitor_offset = BytesPerWord * method()->max_locals() + 217 (2 * BytesPerWord) * (number_of_locks - 1); 218 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 219 // the OSR buffer using 2 word entries: first the lock and then 220 // the oop. 221 for (int i = 0; i < number_of_locks; i++) { 222 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 223 #ifdef ASSERT 224 // verify the interpreter's monitor has a non-null object 225 { 226 Label L; 227 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 228 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 229 __ stop("locked object is NULL"); 230 __ bind(L); 231 } 232 #endif // ASSERT 233 // Copy the lock field into the compiled activation. 234 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 235 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 236 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 237 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 238 } 239 } 240 } 241 242 243 // -------------------------------------------------------------------------------------------- 244 245 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 246 if (!GenerateSynchronizationCode) return; 247 248 Register obj_reg = obj_opr->as_register(); 249 Register lock_reg = lock_opr->as_register(); 250 251 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 252 Register reg = mon_addr.base(); 253 int offset = mon_addr.disp(); 254 // compute pointer to BasicLock 255 if (mon_addr.is_simm13()) { 256 __ add(reg, offset, lock_reg); 257 } 258 else { 259 __ set(offset, lock_reg); 260 __ add(reg, lock_reg, lock_reg); 261 } 262 // unlock object 263 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 264 // _slow_case_stubs->append(slow_case); 265 // temporary fix: must be created after exceptionhandler, therefore as call stub 266 _slow_case_stubs->append(slow_case); 267 if (UseFastLocking) { 268 // try inlined fast unlocking first, revert to slow locking if it fails 269 // note: lock_reg points to the displaced header since the displaced header offset is 0! 270 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 271 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 272 } else { 273 // always do slow unlocking 274 // note: the slow unlocking code could be inlined here, however if we use 275 // slow unlocking, speed doesn't matter anyway and this solution is 276 // simpler and requires less duplicated code - additionally, the 277 // slow unlocking code is the same in either case which simplifies 278 // debugging 279 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 280 __ delayed()->nop(); 281 } 282 // done 283 __ bind(*slow_case->continuation()); 284 } 285 286 287 int LIR_Assembler::emit_exception_handler() { 288 // if the last instruction is a call (typically to do a throw which 289 // is coming at the end after block reordering) the return address 290 // must still point into the code area in order to avoid assertion 291 // failures when searching for the corresponding bci => add a nop 292 // (was bug 5/14/1999 - gri) 293 __ nop(); 294 295 // generate code for exception handler 296 ciMethod* method = compilation()->method(); 297 298 address handler_base = __ start_a_stub(exception_handler_size()); 299 300 if (handler_base == NULL) { 301 // not enough space left for the handler 302 bailout("exception handler overflow"); 303 return -1; 304 } 305 306 int offset = code_offset(); 307 308 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 309 __ delayed()->nop(); 310 __ should_not_reach_here(); 311 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 312 __ end_a_stub(); 313 314 return offset; 315 } 316 317 318 // Emit the code to remove the frame from the stack in the exception 319 // unwind path. 320 int LIR_Assembler::emit_unwind_handler() { 321 #ifndef PRODUCT 322 if (CommentedAssembly) { 323 _masm->block_comment("Unwind handler"); 324 } 325 #endif 326 327 int offset = code_offset(); 328 329 // Fetch the exception from TLS and clear out exception related thread state 330 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 331 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 332 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 333 334 __ bind(_unwind_handler_entry); 335 __ verify_not_null_oop(O0); 336 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 337 __ mov(O0, I0); // Preserve the exception 338 } 339 340 // Preform needed unlocking 341 MonitorExitStub* stub = NULL; 342 if (method()->is_synchronized()) { 343 monitor_address(0, FrameMap::I1_opr); 344 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 345 __ unlock_object(I3, I2, I1, *stub->entry()); 346 __ bind(*stub->continuation()); 347 } 348 349 if (compilation()->env()->dtrace_method_probes()) { 350 __ mov(G2_thread, O0); 351 __ save_thread(I1); // need to preserve thread in G2 across 352 // runtime call 353 metadata2reg(method()->constant_encoding(), O1); 354 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 355 __ delayed()->nop(); 356 __ restore_thread(I1); 357 } 358 359 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 360 __ mov(I0, O0); // Restore the exception 361 } 362 363 // dispatch to the unwind logic 364 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 365 __ delayed()->nop(); 366 367 // Emit the slow path assembly 368 if (stub != NULL) { 369 stub->emit_code(this); 370 } 371 372 return offset; 373 } 374 375 376 int LIR_Assembler::emit_deopt_handler() { 377 // if the last instruction is a call (typically to do a throw which 378 // is coming at the end after block reordering) the return address 379 // must still point into the code area in order to avoid assertion 380 // failures when searching for the corresponding bci => add a nop 381 // (was bug 5/14/1999 - gri) 382 __ nop(); 383 384 // generate code for deopt handler 385 ciMethod* method = compilation()->method(); 386 address handler_base = __ start_a_stub(deopt_handler_size()); 387 if (handler_base == NULL) { 388 // not enough space left for the handler 389 bailout("deopt handler overflow"); 390 return -1; 391 } 392 393 int offset = code_offset(); 394 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 395 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 396 __ delayed()->nop(); 397 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 398 __ end_a_stub(); 399 400 return offset; 401 } 402 403 404 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 405 if (o == NULL) { 406 __ set(NULL_WORD, reg); 407 } else { 408 #ifdef ASSERT 409 { 410 ThreadInVMfromNative tiv(JavaThread::current()); 411 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop"); 412 } 413 #endif 414 int oop_index = __ oop_recorder()->find_index(o); 415 RelocationHolder rspec = oop_Relocation::spec(oop_index); 416 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 417 } 418 } 419 420 421 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 422 // Allocate a new index in table to hold the object once it's been patched 423 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 424 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 425 426 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 427 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 428 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 429 // NULL will be dynamically patched later and the patched value may be large. We must 430 // therefore generate the sethi/add as a placeholders 431 __ patchable_set(addrlit, reg); 432 433 patching_epilog(patch, lir_patch_normal, reg, info); 434 } 435 436 437 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 438 __ set_metadata_constant(o, reg); 439 } 440 441 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 442 // Allocate a new index in table to hold the klass once it's been patched 443 int index = __ oop_recorder()->allocate_metadata_index(NULL); 444 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 445 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); 446 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 447 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 448 // NULL will be dynamically patched later and the patched value may be large. We must 449 // therefore generate the sethi/add as a placeholders 450 __ patchable_set(addrlit, reg); 451 452 patching_epilog(patch, lir_patch_normal, reg, info); 453 } 454 455 void LIR_Assembler::emit_op3(LIR_Op3* op) { 456 switch (op->code()) { 457 case lir_idiv: 458 case lir_irem: // Both idiv & irem are handled after the switch (below). 459 break; 460 case lir_fmaf: 461 __ fmadd(FloatRegisterImpl::S, 462 op->in_opr1()->as_float_reg(), 463 op->in_opr2()->as_float_reg(), 464 op->in_opr3()->as_float_reg(), 465 op->result_opr()->as_float_reg()); 466 return; 467 case lir_fmad: 468 __ fmadd(FloatRegisterImpl::D, 469 op->in_opr1()->as_double_reg(), 470 op->in_opr2()->as_double_reg(), 471 op->in_opr3()->as_double_reg(), 472 op->result_opr()->as_double_reg()); 473 return; 474 default: 475 ShouldNotReachHere(); 476 break; 477 } 478 479 // Handle idiv & irem: 480 481 Register Rdividend = op->in_opr1()->as_register(); 482 Register Rdivisor = noreg; 483 Register Rscratch = op->in_opr3()->as_register(); 484 Register Rresult = op->result_opr()->as_register(); 485 int divisor = -1; 486 487 if (op->in_opr2()->is_register()) { 488 Rdivisor = op->in_opr2()->as_register(); 489 } else { 490 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 491 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 492 } 493 494 assert(Rdividend != Rscratch, ""); 495 assert(Rdivisor != Rscratch, ""); 496 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 497 498 if (Rdivisor == noreg && is_power_of_2(divisor)) { 499 // convert division by a power of two into some shifts and logical operations 500 if (op->code() == lir_idiv) { 501 if (divisor == 2) { 502 __ srl(Rdividend, 31, Rscratch); 503 } else { 504 __ sra(Rdividend, 31, Rscratch); 505 __ and3(Rscratch, divisor - 1, Rscratch); 506 } 507 __ add(Rdividend, Rscratch, Rscratch); 508 __ sra(Rscratch, log2_int(divisor), Rresult); 509 return; 510 } else { 511 if (divisor == 2) { 512 __ srl(Rdividend, 31, Rscratch); 513 } else { 514 __ sra(Rdividend, 31, Rscratch); 515 __ and3(Rscratch, divisor - 1,Rscratch); 516 } 517 __ add(Rdividend, Rscratch, Rscratch); 518 __ andn(Rscratch, divisor - 1,Rscratch); 519 __ sub(Rdividend, Rscratch, Rresult); 520 return; 521 } 522 } 523 524 __ sra(Rdividend, 31, Rscratch); 525 __ wry(Rscratch); 526 527 add_debug_info_for_div0_here(op->info()); 528 529 if (Rdivisor != noreg) { 530 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 531 } else { 532 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 533 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 534 } 535 536 Label skip; 537 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 538 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 539 __ bind(skip); 540 541 if (op->code() == lir_irem) { 542 if (Rdivisor != noreg) { 543 __ smul(Rscratch, Rdivisor, Rscratch); 544 } else { 545 __ smul(Rscratch, divisor, Rscratch); 546 } 547 __ sub(Rdividend, Rscratch, Rresult); 548 } 549 } 550 551 552 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 553 #ifdef ASSERT 554 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 555 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 556 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 557 #endif 558 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 559 560 if (op->cond() == lir_cond_always) { 561 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 562 } else if (op->code() == lir_cond_float_branch) { 563 assert(op->ublock() != NULL, "must have unordered successor"); 564 bool is_unordered = (op->ublock() == op->block()); 565 Assembler::Condition acond; 566 switch (op->cond()) { 567 case lir_cond_equal: acond = Assembler::f_equal; break; 568 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 569 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 570 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 571 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 572 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 573 default : ShouldNotReachHere(); 574 } 575 __ fb( acond, false, Assembler::pn, *(op->label())); 576 } else { 577 assert (op->code() == lir_branch, "just checking"); 578 579 Assembler::Condition acond; 580 switch (op->cond()) { 581 case lir_cond_equal: acond = Assembler::equal; break; 582 case lir_cond_notEqual: acond = Assembler::notEqual; break; 583 case lir_cond_less: acond = Assembler::less; break; 584 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 585 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 586 case lir_cond_greater: acond = Assembler::greater; break; 587 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 588 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 589 default: ShouldNotReachHere(); 590 }; 591 592 // sparc has different condition codes for testing 32-bit 593 // vs. 64-bit values. We could always test xcc is we could 594 // guarantee that 32-bit loads always sign extended but that isn't 595 // true and since sign extension isn't free, it would impose a 596 // slight cost. 597 if (op->type() == T_INT) { 598 __ br(acond, false, Assembler::pn, *(op->label())); 599 } else 600 __ brx(acond, false, Assembler::pn, *(op->label())); 601 } 602 // The peephole pass fills the delay slot 603 } 604 605 606 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 607 Bytecodes::Code code = op->bytecode(); 608 LIR_Opr dst = op->result_opr(); 609 610 switch(code) { 611 case Bytecodes::_i2l: { 612 Register rlo = dst->as_register_lo(); 613 Register rhi = dst->as_register_hi(); 614 Register rval = op->in_opr()->as_register(); 615 __ sra(rval, 0, rlo); 616 break; 617 } 618 case Bytecodes::_i2d: 619 case Bytecodes::_i2f: { 620 bool is_double = (code == Bytecodes::_i2d); 621 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 622 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 623 FloatRegister rsrc = op->in_opr()->as_float_reg(); 624 if (rsrc != rdst) { 625 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 626 } 627 __ fitof(w, rdst, rdst); 628 break; 629 } 630 case Bytecodes::_f2i:{ 631 FloatRegister rsrc = op->in_opr()->as_float_reg(); 632 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 633 Label L; 634 // result must be 0 if value is NaN; test by comparing value to itself 635 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 636 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 637 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 638 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 639 // move integer result from float register to int register 640 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 641 __ bind (L); 642 break; 643 } 644 case Bytecodes::_l2i: { 645 Register rlo = op->in_opr()->as_register_lo(); 646 Register rhi = op->in_opr()->as_register_hi(); 647 Register rdst = dst->as_register(); 648 __ sra(rlo, 0, rdst); 649 break; 650 } 651 case Bytecodes::_d2f: 652 case Bytecodes::_f2d: { 653 bool is_double = (code == Bytecodes::_f2d); 654 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 655 LIR_Opr val = op->in_opr(); 656 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 657 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 658 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 659 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 660 __ ftof(vw, dw, rval, rdst); 661 break; 662 } 663 case Bytecodes::_i2s: 664 case Bytecodes::_i2b: { 665 Register rval = op->in_opr()->as_register(); 666 Register rdst = dst->as_register(); 667 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 668 __ sll (rval, shift, rdst); 669 __ sra (rdst, shift, rdst); 670 break; 671 } 672 case Bytecodes::_i2c: { 673 Register rval = op->in_opr()->as_register(); 674 Register rdst = dst->as_register(); 675 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 676 __ sll (rval, shift, rdst); 677 __ srl (rdst, shift, rdst); 678 break; 679 } 680 681 default: ShouldNotReachHere(); 682 } 683 } 684 685 686 void LIR_Assembler::align_call(LIR_Code) { 687 // do nothing since all instructions are word aligned on sparc 688 } 689 690 691 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 692 __ call(op->addr(), rtype); 693 // The peephole pass fills the delay slot, add_call_info is done in 694 // LIR_Assembler::emit_delay. 695 } 696 697 698 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 699 __ ic_call(op->addr(), false); 700 // The peephole pass fills the delay slot, add_call_info is done in 701 // LIR_Assembler::emit_delay. 702 } 703 704 705 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 706 add_debug_info_for_null_check_here(op->info()); 707 __ load_klass(O0, G3_scratch); 708 if (Assembler::is_simm13(op->vtable_offset())) { 709 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 710 } else { 711 // This will generate 2 instructions 712 __ set(op->vtable_offset(), G5_method); 713 // ld_ptr, set_hi, set 714 __ ld_ptr(G3_scratch, G5_method, G5_method); 715 } 716 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); 717 __ callr(G3_scratch, G0); 718 // the peephole pass fills the delay slot 719 } 720 721 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 722 int store_offset; 723 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 724 assert(base != O7, "destroying register"); 725 assert(!unaligned, "can't handle this"); 726 // for offsets larger than a simm13 we setup the offset in O7 727 __ set(offset, O7); 728 store_offset = store(from_reg, base, O7, type, wide); 729 } else { 730 if (type == T_ARRAY || type == T_OBJECT) { 731 __ verify_oop(from_reg->as_register()); 732 } 733 store_offset = code_offset(); 734 switch (type) { 735 case T_BOOLEAN: // fall through 736 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 737 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 738 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 739 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 740 case T_LONG : 741 if (unaligned || PatchALot) { 742 // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem) 743 assert(G3_scratch != base, "can't handle this"); 744 assert(G3_scratch != from_reg->as_register_lo(), "can't handle this"); 745 __ srax(from_reg->as_register_lo(), 32, G3_scratch); 746 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 747 __ stw(G3_scratch, base, offset + hi_word_offset_in_bytes); 748 } else { 749 __ stx(from_reg->as_register_lo(), base, offset); 750 } 751 break; 752 case T_ADDRESS: 753 case T_METADATA: 754 __ st_ptr(from_reg->as_register(), base, offset); 755 break; 756 case T_ARRAY : // fall through 757 case T_OBJECT: 758 { 759 if (UseCompressedOops && !wide) { 760 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 761 store_offset = code_offset(); 762 __ stw(G3_scratch, base, offset); 763 } else { 764 __ st_ptr(from_reg->as_register(), base, offset); 765 } 766 break; 767 } 768 769 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 770 case T_DOUBLE: 771 { 772 FloatRegister reg = from_reg->as_double_reg(); 773 // split unaligned stores 774 if (unaligned || PatchALot) { 775 assert(Assembler::is_simm13(offset + 4), "must be"); 776 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 777 __ stf(FloatRegisterImpl::S, reg, base, offset); 778 } else { 779 __ stf(FloatRegisterImpl::D, reg, base, offset); 780 } 781 break; 782 } 783 default : ShouldNotReachHere(); 784 } 785 } 786 return store_offset; 787 } 788 789 790 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 791 if (type == T_ARRAY || type == T_OBJECT) { 792 __ verify_oop(from_reg->as_register()); 793 } 794 int store_offset = code_offset(); 795 switch (type) { 796 case T_BOOLEAN: // fall through 797 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 798 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 799 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 800 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 801 case T_LONG : 802 __ stx(from_reg->as_register_lo(), base, disp); 803 break; 804 case T_ADDRESS: 805 __ st_ptr(from_reg->as_register(), base, disp); 806 break; 807 case T_ARRAY : // fall through 808 case T_OBJECT: 809 { 810 if (UseCompressedOops && !wide) { 811 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 812 store_offset = code_offset(); 813 __ stw(G3_scratch, base, disp); 814 } else { 815 __ st_ptr(from_reg->as_register(), base, disp); 816 } 817 break; 818 } 819 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 820 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 821 default : ShouldNotReachHere(); 822 } 823 return store_offset; 824 } 825 826 827 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 828 int load_offset; 829 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 830 assert(base != O7, "destroying register"); 831 assert(!unaligned, "can't handle this"); 832 // for offsets larger than a simm13 we setup the offset in O7 833 __ set(offset, O7); 834 load_offset = load(base, O7, to_reg, type, wide); 835 } else { 836 load_offset = code_offset(); 837 switch(type) { 838 case T_BOOLEAN: // fall through 839 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 840 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 841 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 842 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 843 case T_LONG : 844 if (!unaligned && !PatchALot) { 845 __ ldx(base, offset, to_reg->as_register_lo()); 846 } else { 847 assert(base != to_reg->as_register_lo(), "can't handle this"); 848 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 849 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 850 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 851 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 852 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 853 } 854 break; 855 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; 856 case T_ADDRESS: 857 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 858 __ lduw(base, offset, to_reg->as_register()); 859 __ decode_klass_not_null(to_reg->as_register()); 860 } else 861 { 862 __ ld_ptr(base, offset, to_reg->as_register()); 863 } 864 break; 865 case T_ARRAY : // fall through 866 case T_OBJECT: 867 { 868 if (UseCompressedOops && !wide) { 869 __ lduw(base, offset, to_reg->as_register()); 870 __ decode_heap_oop(to_reg->as_register()); 871 } else { 872 __ ld_ptr(base, offset, to_reg->as_register()); 873 } 874 break; 875 } 876 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 877 case T_DOUBLE: 878 { 879 FloatRegister reg = to_reg->as_double_reg(); 880 // split unaligned loads 881 if (unaligned || PatchALot) { 882 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 883 __ ldf(FloatRegisterImpl::S, base, offset, reg); 884 } else { 885 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 886 } 887 break; 888 } 889 default : ShouldNotReachHere(); 890 } 891 if (type == T_ARRAY || type == T_OBJECT) { 892 __ verify_oop(to_reg->as_register()); 893 } 894 } 895 return load_offset; 896 } 897 898 899 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 900 int load_offset = code_offset(); 901 switch(type) { 902 case T_BOOLEAN: // fall through 903 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 904 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 905 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 906 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 907 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 908 case T_ARRAY : // fall through 909 case T_OBJECT: 910 { 911 if (UseCompressedOops && !wide) { 912 __ lduw(base, disp, to_reg->as_register()); 913 __ decode_heap_oop(to_reg->as_register()); 914 } else { 915 __ ld_ptr(base, disp, to_reg->as_register()); 916 } 917 break; 918 } 919 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 920 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 921 case T_LONG : 922 __ ldx(base, disp, to_reg->as_register_lo()); 923 break; 924 default : ShouldNotReachHere(); 925 } 926 if (type == T_ARRAY || type == T_OBJECT) { 927 __ verify_oop(to_reg->as_register()); 928 } 929 return load_offset; 930 } 931 932 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 933 LIR_Const* c = src->as_constant_ptr(); 934 switch (c->type()) { 935 case T_INT: 936 case T_FLOAT: { 937 Register src_reg = O7; 938 int value = c->as_jint_bits(); 939 if (value == 0) { 940 src_reg = G0; 941 } else { 942 __ set(value, O7); 943 } 944 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 945 __ stw(src_reg, addr.base(), addr.disp()); 946 break; 947 } 948 case T_ADDRESS: { 949 Register src_reg = O7; 950 int value = c->as_jint_bits(); 951 if (value == 0) { 952 src_reg = G0; 953 } else { 954 __ set(value, O7); 955 } 956 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 957 __ st_ptr(src_reg, addr.base(), addr.disp()); 958 break; 959 } 960 case T_OBJECT: { 961 Register src_reg = O7; 962 jobject2reg(c->as_jobject(), src_reg); 963 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 964 __ st_ptr(src_reg, addr.base(), addr.disp()); 965 break; 966 } 967 case T_LONG: 968 case T_DOUBLE: { 969 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 970 971 Register tmp = O7; 972 int value_lo = c->as_jint_lo_bits(); 973 if (value_lo == 0) { 974 tmp = G0; 975 } else { 976 __ set(value_lo, O7); 977 } 978 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 979 int value_hi = c->as_jint_hi_bits(); 980 if (value_hi == 0) { 981 tmp = G0; 982 } else { 983 __ set(value_hi, O7); 984 } 985 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 986 break; 987 } 988 default: 989 Unimplemented(); 990 } 991 } 992 993 994 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 995 LIR_Const* c = src->as_constant_ptr(); 996 LIR_Address* addr = dest->as_address_ptr(); 997 Register base = addr->base()->as_pointer_register(); 998 int offset = -1; 999 1000 switch (c->type()) { 1001 case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions. 1002 case T_INT: 1003 case T_ADDRESS: { 1004 LIR_Opr tmp = FrameMap::O7_opr; 1005 int value = c->as_jint_bits(); 1006 if (value == 0) { 1007 tmp = FrameMap::G0_opr; 1008 } else if (Assembler::is_simm13(value)) { 1009 __ set(value, O7); 1010 } 1011 if (addr->index()->is_valid()) { 1012 assert(addr->disp() == 0, "must be zero"); 1013 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1014 } else { 1015 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1016 offset = store(tmp, base, addr->disp(), type, wide, false); 1017 } 1018 break; 1019 } 1020 case T_LONG: 1021 case T_DOUBLE: { 1022 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1023 assert(Assembler::is_simm13(addr->disp()) && 1024 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1025 1026 LIR_Opr tmp = FrameMap::O7_opr; 1027 int value_lo = c->as_jint_lo_bits(); 1028 if (value_lo == 0) { 1029 tmp = FrameMap::G0_opr; 1030 } else { 1031 __ set(value_lo, O7); 1032 } 1033 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1034 int value_hi = c->as_jint_hi_bits(); 1035 if (value_hi == 0) { 1036 tmp = FrameMap::G0_opr; 1037 } else { 1038 __ set(value_hi, O7); 1039 } 1040 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1041 break; 1042 } 1043 case T_OBJECT: { 1044 jobject obj = c->as_jobject(); 1045 LIR_Opr tmp; 1046 if (obj == NULL) { 1047 tmp = FrameMap::G0_opr; 1048 } else { 1049 tmp = FrameMap::O7_opr; 1050 jobject2reg(c->as_jobject(), O7); 1051 } 1052 // handle either reg+reg or reg+disp address 1053 if (addr->index()->is_valid()) { 1054 assert(addr->disp() == 0, "must be zero"); 1055 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1056 } else { 1057 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1058 offset = store(tmp, base, addr->disp(), type, wide, false); 1059 } 1060 1061 break; 1062 } 1063 default: 1064 Unimplemented(); 1065 } 1066 if (info != NULL) { 1067 assert(offset != -1, "offset should've been set"); 1068 add_debug_info_for_null_check(offset, info); 1069 } 1070 } 1071 1072 1073 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1074 LIR_Const* c = src->as_constant_ptr(); 1075 LIR_Opr to_reg = dest; 1076 1077 switch (c->type()) { 1078 case T_INT: 1079 case T_ADDRESS: 1080 { 1081 jint con = c->as_jint(); 1082 if (to_reg->is_single_cpu()) { 1083 assert(patch_code == lir_patch_none, "no patching handled here"); 1084 __ set(con, to_reg->as_register()); 1085 } else { 1086 ShouldNotReachHere(); 1087 assert(to_reg->is_single_fpu(), "wrong register kind"); 1088 1089 __ set(con, O7); 1090 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1091 __ st(O7, temp_slot); 1092 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1093 } 1094 } 1095 break; 1096 1097 case T_LONG: 1098 { 1099 jlong con = c->as_jlong(); 1100 1101 if (to_reg->is_double_cpu()) { 1102 __ set(con, to_reg->as_register_lo()); 1103 } else if (to_reg->is_single_cpu()) { 1104 __ set(con, to_reg->as_register()); 1105 } else { 1106 ShouldNotReachHere(); 1107 assert(to_reg->is_double_fpu(), "wrong register kind"); 1108 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1109 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1110 __ set(low(con), O7); 1111 __ st(O7, temp_slot_lo); 1112 __ set(high(con), O7); 1113 __ st(O7, temp_slot_hi); 1114 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1115 } 1116 } 1117 break; 1118 1119 case T_OBJECT: 1120 { 1121 if (patch_code == lir_patch_none) { 1122 jobject2reg(c->as_jobject(), to_reg->as_register()); 1123 } else { 1124 jobject2reg_with_patching(to_reg->as_register(), info); 1125 } 1126 } 1127 break; 1128 1129 case T_METADATA: 1130 { 1131 if (patch_code == lir_patch_none) { 1132 metadata2reg(c->as_metadata(), to_reg->as_register()); 1133 } else { 1134 klass2reg_with_patching(to_reg->as_register(), info); 1135 } 1136 } 1137 break; 1138 1139 case T_FLOAT: 1140 { 1141 address const_addr = __ float_constant(c->as_jfloat()); 1142 if (const_addr == NULL) { 1143 bailout("const section overflow"); 1144 break; 1145 } 1146 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1147 AddressLiteral const_addrlit(const_addr, rspec); 1148 if (to_reg->is_single_fpu()) { 1149 __ patchable_sethi(const_addrlit, O7); 1150 __ relocate(rspec); 1151 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1152 1153 } else { 1154 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1155 1156 __ set(const_addrlit, O7); 1157 __ ld(O7, 0, to_reg->as_register()); 1158 } 1159 } 1160 break; 1161 1162 case T_DOUBLE: 1163 { 1164 address const_addr = __ double_constant(c->as_jdouble()); 1165 if (const_addr == NULL) { 1166 bailout("const section overflow"); 1167 break; 1168 } 1169 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1170 1171 if (to_reg->is_double_fpu()) { 1172 AddressLiteral const_addrlit(const_addr, rspec); 1173 __ patchable_sethi(const_addrlit, O7); 1174 __ relocate(rspec); 1175 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1176 } else { 1177 assert(to_reg->is_double_cpu(), "Must be a long register."); 1178 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1179 } 1180 1181 } 1182 break; 1183 1184 default: 1185 ShouldNotReachHere(); 1186 } 1187 } 1188 1189 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1190 Register reg = addr->base()->as_pointer_register(); 1191 LIR_Opr index = addr->index(); 1192 if (index->is_illegal()) { 1193 return Address(reg, addr->disp()); 1194 } else { 1195 assert (addr->disp() == 0, "unsupported address mode"); 1196 return Address(reg, index->as_pointer_register()); 1197 } 1198 } 1199 1200 1201 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1202 switch (type) { 1203 case T_INT: 1204 case T_FLOAT: { 1205 Register tmp = O7; 1206 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1207 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1208 __ lduw(from.base(), from.disp(), tmp); 1209 __ stw(tmp, to.base(), to.disp()); 1210 break; 1211 } 1212 case T_ADDRESS: 1213 case T_OBJECT: { 1214 Register tmp = O7; 1215 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1216 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1217 __ ld_ptr(from.base(), from.disp(), tmp); 1218 __ st_ptr(tmp, to.base(), to.disp()); 1219 break; 1220 } 1221 case T_LONG: 1222 case T_DOUBLE: { 1223 Register tmp = O7; 1224 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1225 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1226 __ lduw(from.base(), from.disp(), tmp); 1227 __ stw(tmp, to.base(), to.disp()); 1228 __ lduw(from.base(), from.disp() + 4, tmp); 1229 __ stw(tmp, to.base(), to.disp() + 4); 1230 break; 1231 } 1232 1233 default: 1234 ShouldNotReachHere(); 1235 } 1236 } 1237 1238 1239 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1240 Address base = as_Address(addr); 1241 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1242 } 1243 1244 1245 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1246 Address base = as_Address(addr); 1247 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1248 } 1249 1250 1251 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1252 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1253 1254 assert(type != T_METADATA, "load of metadata ptr not supported"); 1255 LIR_Address* addr = src_opr->as_address_ptr(); 1256 LIR_Opr to_reg = dest; 1257 1258 Register src = addr->base()->as_pointer_register(); 1259 Register disp_reg = noreg; 1260 int disp_value = addr->disp(); 1261 bool needs_patching = (patch_code != lir_patch_none); 1262 1263 if (addr->base()->type() == T_OBJECT) { 1264 __ verify_oop(src); 1265 } 1266 1267 PatchingStub* patch = NULL; 1268 if (needs_patching) { 1269 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1270 assert(!to_reg->is_double_cpu() || 1271 patch_code == lir_patch_none || 1272 patch_code == lir_patch_normal, "patching doesn't match register"); 1273 } 1274 1275 if (addr->index()->is_illegal()) { 1276 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1277 if (needs_patching) { 1278 __ patchable_set(0, O7); 1279 } else { 1280 __ set(disp_value, O7); 1281 } 1282 disp_reg = O7; 1283 } 1284 } else if (unaligned || PatchALot) { 1285 __ add(src, addr->index()->as_pointer_register(), O7); 1286 src = O7; 1287 } else { 1288 disp_reg = addr->index()->as_pointer_register(); 1289 assert(disp_value == 0, "can't handle 3 operand addresses"); 1290 } 1291 1292 // remember the offset of the load. The patching_epilog must be done 1293 // before the call to add_debug_info, otherwise the PcDescs don't get 1294 // entered in increasing order. 1295 int offset = code_offset(); 1296 1297 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1298 if (disp_reg == noreg) { 1299 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1300 } else { 1301 assert(!unaligned, "can't handle this"); 1302 offset = load(src, disp_reg, to_reg, type, wide); 1303 } 1304 1305 if (patch != NULL) { 1306 patching_epilog(patch, patch_code, src, info); 1307 } 1308 if (info != NULL) add_debug_info_for_null_check(offset, info); 1309 } 1310 1311 1312 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1313 Address addr; 1314 if (src->is_single_word()) { 1315 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1316 } else if (src->is_double_word()) { 1317 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1318 } 1319 1320 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1321 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1322 } 1323 1324 1325 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1326 Address addr; 1327 if (dest->is_single_word()) { 1328 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1329 } else if (dest->is_double_word()) { 1330 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1331 } 1332 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1333 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1334 } 1335 1336 1337 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1338 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1339 if (from_reg->is_double_fpu()) { 1340 // double to double moves 1341 assert(to_reg->is_double_fpu(), "should match"); 1342 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1343 } else { 1344 // float to float moves 1345 assert(to_reg->is_single_fpu(), "should match"); 1346 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1347 } 1348 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1349 if (from_reg->is_double_cpu()) { 1350 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1351 } else if (to_reg->is_double_cpu()) { 1352 // int to int moves 1353 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1354 } else { 1355 // int to int moves 1356 __ mov(from_reg->as_register(), to_reg->as_register()); 1357 } 1358 } else { 1359 ShouldNotReachHere(); 1360 } 1361 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1362 __ verify_oop(to_reg->as_register()); 1363 } 1364 } 1365 1366 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1367 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1368 bool wide, bool unaligned) { 1369 assert(type != T_METADATA, "store of metadata ptr not supported"); 1370 LIR_Address* addr = dest->as_address_ptr(); 1371 1372 Register src = addr->base()->as_pointer_register(); 1373 Register disp_reg = noreg; 1374 int disp_value = addr->disp(); 1375 bool needs_patching = (patch_code != lir_patch_none); 1376 1377 if (addr->base()->is_oop_register()) { 1378 __ verify_oop(src); 1379 } 1380 1381 PatchingStub* patch = NULL; 1382 if (needs_patching) { 1383 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1384 assert(!from_reg->is_double_cpu() || 1385 patch_code == lir_patch_none || 1386 patch_code == lir_patch_normal, "patching doesn't match register"); 1387 } 1388 1389 if (addr->index()->is_illegal()) { 1390 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1391 if (needs_patching) { 1392 __ patchable_set(0, O7); 1393 } else { 1394 __ set(disp_value, O7); 1395 } 1396 disp_reg = O7; 1397 } 1398 } else if (unaligned || PatchALot) { 1399 __ add(src, addr->index()->as_pointer_register(), O7); 1400 src = O7; 1401 } else { 1402 disp_reg = addr->index()->as_pointer_register(); 1403 assert(disp_value == 0, "can't handle 3 operand addresses"); 1404 } 1405 1406 // remember the offset of the store. The patching_epilog must be done 1407 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1408 // entered in increasing order. 1409 int offset; 1410 1411 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1412 if (disp_reg == noreg) { 1413 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1414 } else { 1415 assert(!unaligned, "can't handle this"); 1416 offset = store(from_reg, src, disp_reg, type, wide); 1417 } 1418 1419 if (patch != NULL) { 1420 patching_epilog(patch, patch_code, src, info); 1421 } 1422 1423 if (info != NULL) add_debug_info_for_null_check(offset, info); 1424 } 1425 1426 1427 void LIR_Assembler::return_op(LIR_Opr result) { 1428 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1429 __ reserved_stack_check(); 1430 } 1431 if (SafepointMechanism::uses_thread_local_poll()) { 1432 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0); 1433 } else { 1434 __ set((intptr_t)os::get_polling_page(), L0); 1435 } 1436 __ relocate(relocInfo::poll_return_type); 1437 __ ld_ptr(L0, 0, G0); 1438 __ ret(); 1439 __ delayed()->restore(); 1440 } 1441 1442 1443 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1444 if (SafepointMechanism::uses_thread_local_poll()) { 1445 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), tmp->as_register()); 1446 } else { 1447 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1448 } 1449 if (info != NULL) { 1450 add_debug_info_for_branch(info); 1451 } 1452 int offset = __ offset(); 1453 1454 __ relocate(relocInfo::poll_type); 1455 __ ld_ptr(tmp->as_register(), 0, G0); 1456 return offset; 1457 } 1458 1459 1460 void LIR_Assembler::emit_static_call_stub() { 1461 address call_pc = __ pc(); 1462 address stub = __ start_a_stub(call_stub_size()); 1463 if (stub == NULL) { 1464 bailout("static call stub overflow"); 1465 return; 1466 } 1467 1468 int start = __ offset(); 1469 __ relocate(static_stub_Relocation::spec(call_pc)); 1470 1471 __ set_metadata(NULL, G5); 1472 // must be set to -1 at code generation time 1473 AddressLiteral addrlit(-1); 1474 __ jump_to(addrlit, G3); 1475 __ delayed()->nop(); 1476 1477 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1478 __ end_a_stub(); 1479 } 1480 1481 1482 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1483 if (opr1->is_single_fpu()) { 1484 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1485 } else if (opr1->is_double_fpu()) { 1486 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1487 } else if (opr1->is_single_cpu()) { 1488 if (opr2->is_constant()) { 1489 switch (opr2->as_constant_ptr()->type()) { 1490 case T_INT: 1491 { jint con = opr2->as_constant_ptr()->as_jint(); 1492 if (Assembler::is_simm13(con)) { 1493 __ cmp(opr1->as_register(), con); 1494 } else { 1495 __ set(con, O7); 1496 __ cmp(opr1->as_register(), O7); 1497 } 1498 } 1499 break; 1500 1501 case T_OBJECT: 1502 // there are only equal/notequal comparisions on objects 1503 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1504 if (con == NULL) { 1505 __ cmp(opr1->as_register(), 0); 1506 } else { 1507 jobject2reg(con, O7); 1508 __ cmp(opr1->as_register(), O7); 1509 } 1510 } 1511 break; 1512 1513 default: 1514 ShouldNotReachHere(); 1515 break; 1516 } 1517 } else { 1518 if (opr2->is_address()) { 1519 LIR_Address * addr = opr2->as_address_ptr(); 1520 BasicType type = addr->type(); 1521 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1522 else __ ld(as_Address(addr), O7); 1523 __ cmp(opr1->as_register(), O7); 1524 } else { 1525 __ cmp(opr1->as_register(), opr2->as_register()); 1526 } 1527 } 1528 } else if (opr1->is_double_cpu()) { 1529 Register xlo = opr1->as_register_lo(); 1530 Register xhi = opr1->as_register_hi(); 1531 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1532 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1533 __ orcc(xhi, G0, G0); 1534 } else if (opr2->is_register()) { 1535 Register ylo = opr2->as_register_lo(); 1536 Register yhi = opr2->as_register_hi(); 1537 __ cmp(xlo, ylo); 1538 } else { 1539 ShouldNotReachHere(); 1540 } 1541 } else if (opr1->is_address()) { 1542 LIR_Address * addr = opr1->as_address_ptr(); 1543 BasicType type = addr->type(); 1544 assert (opr2->is_constant(), "Checking"); 1545 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1546 else __ ld(as_Address(addr), O7); 1547 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1548 } else { 1549 ShouldNotReachHere(); 1550 } 1551 } 1552 1553 1554 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1555 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1556 bool is_unordered_less = (code == lir_ucmp_fd2i); 1557 if (left->is_single_fpu()) { 1558 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1559 } else if (left->is_double_fpu()) { 1560 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1561 } else { 1562 ShouldNotReachHere(); 1563 } 1564 } else if (code == lir_cmp_l2i) { 1565 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1566 } else { 1567 ShouldNotReachHere(); 1568 } 1569 } 1570 1571 1572 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1573 Assembler::Condition acond; 1574 switch (condition) { 1575 case lir_cond_equal: acond = Assembler::equal; break; 1576 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1577 case lir_cond_less: acond = Assembler::less; break; 1578 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1579 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1580 case lir_cond_greater: acond = Assembler::greater; break; 1581 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1582 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1583 default: ShouldNotReachHere(); 1584 }; 1585 1586 if (opr1->is_constant() && opr1->type() == T_INT) { 1587 Register dest = result->as_register(); 1588 // load up first part of constant before branch 1589 // and do the rest in the delay slot. 1590 if (!Assembler::is_simm13(opr1->as_jint())) { 1591 __ sethi(opr1->as_jint(), dest); 1592 } 1593 } else if (opr1->is_constant()) { 1594 const2reg(opr1, result, lir_patch_none, NULL); 1595 } else if (opr1->is_register()) { 1596 reg2reg(opr1, result); 1597 } else if (opr1->is_stack()) { 1598 stack2reg(opr1, result, result->type()); 1599 } else { 1600 ShouldNotReachHere(); 1601 } 1602 Label skip; 1603 if (type == T_INT) { 1604 __ br(acond, false, Assembler::pt, skip); 1605 } else { 1606 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1607 } 1608 if (opr1->is_constant() && opr1->type() == T_INT) { 1609 Register dest = result->as_register(); 1610 if (Assembler::is_simm13(opr1->as_jint())) { 1611 __ delayed()->or3(G0, opr1->as_jint(), dest); 1612 } else { 1613 // the sethi has been done above, so just put in the low 10 bits 1614 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1615 } 1616 } else { 1617 // can't do anything useful in the delay slot 1618 __ delayed()->nop(); 1619 } 1620 if (opr2->is_constant()) { 1621 const2reg(opr2, result, lir_patch_none, NULL); 1622 } else if (opr2->is_register()) { 1623 reg2reg(opr2, result); 1624 } else if (opr2->is_stack()) { 1625 stack2reg(opr2, result, result->type()); 1626 } else { 1627 ShouldNotReachHere(); 1628 } 1629 __ bind(skip); 1630 } 1631 1632 1633 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1634 assert(info == NULL, "unused on this code path"); 1635 assert(left->is_register(), "wrong items state"); 1636 assert(dest->is_register(), "wrong items state"); 1637 1638 if (right->is_register()) { 1639 if (dest->is_float_kind()) { 1640 1641 FloatRegister lreg, rreg, res; 1642 FloatRegisterImpl::Width w; 1643 if (right->is_single_fpu()) { 1644 w = FloatRegisterImpl::S; 1645 lreg = left->as_float_reg(); 1646 rreg = right->as_float_reg(); 1647 res = dest->as_float_reg(); 1648 } else { 1649 w = FloatRegisterImpl::D; 1650 lreg = left->as_double_reg(); 1651 rreg = right->as_double_reg(); 1652 res = dest->as_double_reg(); 1653 } 1654 1655 switch (code) { 1656 case lir_add: __ fadd(w, lreg, rreg, res); break; 1657 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1658 case lir_mul: // fall through 1659 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1660 case lir_div: // fall through 1661 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1662 default: ShouldNotReachHere(); 1663 } 1664 1665 } else if (dest->is_double_cpu()) { 1666 Register dst_lo = dest->as_register_lo(); 1667 Register op1_lo = left->as_pointer_register(); 1668 Register op2_lo = right->as_pointer_register(); 1669 1670 switch (code) { 1671 case lir_add: 1672 __ add(op1_lo, op2_lo, dst_lo); 1673 break; 1674 1675 case lir_sub: 1676 __ sub(op1_lo, op2_lo, dst_lo); 1677 break; 1678 1679 default: ShouldNotReachHere(); 1680 } 1681 } else { 1682 assert (right->is_single_cpu(), "Just Checking"); 1683 1684 Register lreg = left->as_register(); 1685 Register res = dest->as_register(); 1686 Register rreg = right->as_register(); 1687 switch (code) { 1688 case lir_add: __ add (lreg, rreg, res); break; 1689 case lir_sub: __ sub (lreg, rreg, res); break; 1690 case lir_mul: __ mulx (lreg, rreg, res); break; 1691 default: ShouldNotReachHere(); 1692 } 1693 } 1694 } else { 1695 assert (right->is_constant(), "must be constant"); 1696 1697 if (dest->is_single_cpu()) { 1698 Register lreg = left->as_register(); 1699 Register res = dest->as_register(); 1700 int simm13 = right->as_constant_ptr()->as_jint(); 1701 1702 switch (code) { 1703 case lir_add: __ add (lreg, simm13, res); break; 1704 case lir_sub: __ sub (lreg, simm13, res); break; 1705 case lir_mul: __ mulx (lreg, simm13, res); break; 1706 default: ShouldNotReachHere(); 1707 } 1708 } else { 1709 Register lreg = left->as_pointer_register(); 1710 Register res = dest->as_register_lo(); 1711 long con = right->as_constant_ptr()->as_jlong(); 1712 assert(Assembler::is_simm13(con), "must be simm13"); 1713 1714 switch (code) { 1715 case lir_add: __ add (lreg, (int)con, res); break; 1716 case lir_sub: __ sub (lreg, (int)con, res); break; 1717 case lir_mul: __ mulx (lreg, (int)con, res); break; 1718 default: ShouldNotReachHere(); 1719 } 1720 } 1721 } 1722 } 1723 1724 1725 void LIR_Assembler::fpop() { 1726 // do nothing 1727 } 1728 1729 1730 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1731 switch (code) { 1732 case lir_tan: { 1733 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1734 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1735 break; 1736 } 1737 case lir_sqrt: { 1738 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1739 FloatRegister src_reg = value->as_double_reg(); 1740 FloatRegister dst_reg = dest->as_double_reg(); 1741 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1742 break; 1743 } 1744 case lir_abs: { 1745 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1746 FloatRegister src_reg = value->as_double_reg(); 1747 FloatRegister dst_reg = dest->as_double_reg(); 1748 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1749 break; 1750 } 1751 default: { 1752 ShouldNotReachHere(); 1753 break; 1754 } 1755 } 1756 } 1757 1758 1759 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1760 if (right->is_constant()) { 1761 if (dest->is_single_cpu()) { 1762 int simm13 = right->as_constant_ptr()->as_jint(); 1763 switch (code) { 1764 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1765 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1766 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1767 default: ShouldNotReachHere(); 1768 } 1769 } else { 1770 long c = right->as_constant_ptr()->as_jlong(); 1771 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1772 int simm13 = (int)c; 1773 switch (code) { 1774 case lir_logic_and: 1775 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1776 break; 1777 1778 case lir_logic_or: 1779 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1780 break; 1781 1782 case lir_logic_xor: 1783 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1784 break; 1785 1786 default: ShouldNotReachHere(); 1787 } 1788 } 1789 } else { 1790 assert(right->is_register(), "right should be in register"); 1791 1792 if (dest->is_single_cpu()) { 1793 switch (code) { 1794 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1795 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1796 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1797 default: ShouldNotReachHere(); 1798 } 1799 } else { 1800 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1801 left->as_register_lo(); 1802 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1803 right->as_register_lo(); 1804 1805 switch (code) { 1806 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1807 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1808 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1809 default: ShouldNotReachHere(); 1810 } 1811 } 1812 } 1813 } 1814 1815 1816 int LIR_Assembler::shift_amount(BasicType t) { 1817 int elem_size = type2aelembytes(t); 1818 switch (elem_size) { 1819 case 1 : return 0; 1820 case 2 : return 1; 1821 case 4 : return 2; 1822 case 8 : return 3; 1823 } 1824 ShouldNotReachHere(); 1825 return -1; 1826 } 1827 1828 1829 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1830 assert(exceptionOop->as_register() == Oexception, "should match"); 1831 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 1832 1833 info->add_register_oop(exceptionOop); 1834 1835 // reuse the debug info from the safepoint poll for the throw op itself 1836 address pc_for_athrow = __ pc(); 1837 int pc_for_athrow_offset = __ offset(); 1838 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1839 __ set(pc_for_athrow, Oissuing_pc, rspec); 1840 add_call_info(pc_for_athrow_offset, info); // for exception handler 1841 1842 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 1843 __ delayed()->nop(); 1844 } 1845 1846 1847 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1848 assert(exceptionOop->as_register() == Oexception, "should match"); 1849 1850 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 1851 __ delayed()->nop(); 1852 } 1853 1854 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1855 Register src = op->src()->as_register(); 1856 Register dst = op->dst()->as_register(); 1857 Register src_pos = op->src_pos()->as_register(); 1858 Register dst_pos = op->dst_pos()->as_register(); 1859 Register length = op->length()->as_register(); 1860 Register tmp = op->tmp()->as_register(); 1861 Register tmp2 = O7; 1862 1863 int flags = op->flags(); 1864 ciArrayKlass* default_type = op->expected_type(); 1865 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1866 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1867 1868 // higher 32bits must be null 1869 __ sra(dst_pos, 0, dst_pos); 1870 __ sra(src_pos, 0, src_pos); 1871 __ sra(length, 0, length); 1872 1873 // set up the arraycopy stub information 1874 ArrayCopyStub* stub = op->stub(); 1875 1876 // always do stub if no type information is available. it's ok if 1877 // the known type isn't loaded since the code sanity checks 1878 // in debug mode and the type isn't required when we know the exact type 1879 // also check that the type is an array type. 1880 if (op->expected_type() == NULL) { 1881 __ mov(src, O0); 1882 __ mov(src_pos, O1); 1883 __ mov(dst, O2); 1884 __ mov(dst_pos, O3); 1885 __ mov(length, O4); 1886 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1887 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 1888 1889 #ifndef PRODUCT 1890 if (PrintC1Statistics) { 1891 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1892 __ inc_counter(counter, G1, G3); 1893 } 1894 #endif 1895 __ call_VM_leaf(tmp, copyfunc_addr); 1896 1897 __ xor3(O0, -1, tmp); 1898 __ sub(length, tmp, length); 1899 __ add(src_pos, tmp, src_pos); 1900 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 1901 __ delayed()->add(dst_pos, tmp, dst_pos); 1902 __ bind(*stub->continuation()); 1903 return; 1904 } 1905 1906 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1907 1908 // make sure src and dst are non-null and load array length 1909 if (flags & LIR_OpArrayCopy::src_null_check) { 1910 __ tst(src); 1911 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 1912 __ delayed()->nop(); 1913 } 1914 1915 if (flags & LIR_OpArrayCopy::dst_null_check) { 1916 __ tst(dst); 1917 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 1918 __ delayed()->nop(); 1919 } 1920 1921 // If the compiler was not able to prove that exact type of the source or the destination 1922 // of the arraycopy is an array type, check at runtime if the source or the destination is 1923 // an instance type. 1924 if (flags & LIR_OpArrayCopy::type_check) { 1925 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 1926 __ load_klass(dst, tmp); 1927 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 1928 __ cmp(tmp2, Klass::_lh_neutral_value); 1929 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 1930 __ delayed()->nop(); 1931 } 1932 1933 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 1934 __ load_klass(src, tmp); 1935 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 1936 __ cmp(tmp2, Klass::_lh_neutral_value); 1937 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 1938 __ delayed()->nop(); 1939 } 1940 } 1941 1942 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1943 // test src_pos register 1944 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 1945 __ delayed()->nop(); 1946 } 1947 1948 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1949 // test dst_pos register 1950 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 1951 __ delayed()->nop(); 1952 } 1953 1954 if (flags & LIR_OpArrayCopy::length_positive_check) { 1955 // make sure length isn't negative 1956 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 1957 __ delayed()->nop(); 1958 } 1959 1960 if (flags & LIR_OpArrayCopy::src_range_check) { 1961 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 1962 __ add(length, src_pos, tmp); 1963 __ cmp(tmp2, tmp); 1964 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 1965 __ delayed()->nop(); 1966 } 1967 1968 if (flags & LIR_OpArrayCopy::dst_range_check) { 1969 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 1970 __ add(length, dst_pos, tmp); 1971 __ cmp(tmp2, tmp); 1972 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 1973 __ delayed()->nop(); 1974 } 1975 1976 int shift = shift_amount(basic_type); 1977 1978 if (flags & LIR_OpArrayCopy::type_check) { 1979 // We don't know the array types are compatible 1980 if (basic_type != T_OBJECT) { 1981 // Simple test for basic type arrays 1982 if (UseCompressedClassPointers) { 1983 // We don't need decode because we just need to compare 1984 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 1985 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 1986 __ cmp(tmp, tmp2); 1987 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 1988 } else { 1989 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 1990 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 1991 __ cmp(tmp, tmp2); 1992 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 1993 } 1994 __ delayed()->nop(); 1995 } else { 1996 // For object arrays, if src is a sub class of dst then we can 1997 // safely do the copy. 1998 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 1999 2000 Label cont, slow; 2001 assert_different_registers(tmp, tmp2, G3, G1); 2002 2003 __ load_klass(src, G3); 2004 __ load_klass(dst, G1); 2005 2006 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2007 2008 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2009 __ delayed()->nop(); 2010 2011 __ cmp(G3, 0); 2012 if (copyfunc_addr != NULL) { // use stub if available 2013 // src is not a sub class of dst so we have to do a 2014 // per-element check. 2015 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2016 __ delayed()->nop(); 2017 2018 __ bind(slow); 2019 2020 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2021 if ((flags & mask) != mask) { 2022 // Check that at least both of them object arrays. 2023 assert(flags & mask, "one of the two should be known to be an object array"); 2024 2025 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2026 __ load_klass(src, tmp); 2027 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2028 __ load_klass(dst, tmp); 2029 } 2030 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2031 2032 __ lduw(tmp, lh_offset, tmp2); 2033 2034 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2035 __ set(objArray_lh, tmp); 2036 __ cmp(tmp, tmp2); 2037 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2038 __ delayed()->nop(); 2039 } 2040 2041 Register src_ptr = O0; 2042 Register dst_ptr = O1; 2043 Register len = O2; 2044 Register chk_off = O3; 2045 Register super_k = O4; 2046 2047 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2048 if (shift == 0) { 2049 __ add(src_ptr, src_pos, src_ptr); 2050 } else { 2051 __ sll(src_pos, shift, tmp); 2052 __ add(src_ptr, tmp, src_ptr); 2053 } 2054 2055 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2056 if (shift == 0) { 2057 __ add(dst_ptr, dst_pos, dst_ptr); 2058 } else { 2059 __ sll(dst_pos, shift, tmp); 2060 __ add(dst_ptr, tmp, dst_ptr); 2061 } 2062 __ mov(length, len); 2063 __ load_klass(dst, tmp); 2064 2065 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2066 __ ld_ptr(tmp, ek_offset, super_k); 2067 2068 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2069 __ lduw(super_k, sco_offset, chk_off); 2070 2071 __ call_VM_leaf(tmp, copyfunc_addr); 2072 2073 #ifndef PRODUCT 2074 if (PrintC1Statistics) { 2075 Label failed; 2076 __ br_notnull_short(O0, Assembler::pn, failed); 2077 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2078 __ bind(failed); 2079 } 2080 #endif 2081 2082 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2083 __ delayed()->xor3(O0, -1, tmp); 2084 2085 #ifndef PRODUCT 2086 if (PrintC1Statistics) { 2087 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2088 } 2089 #endif 2090 2091 __ sub(length, tmp, length); 2092 __ add(src_pos, tmp, src_pos); 2093 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2094 __ delayed()->add(dst_pos, tmp, dst_pos); 2095 2096 __ bind(cont); 2097 } else { 2098 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2099 __ delayed()->nop(); 2100 __ bind(cont); 2101 } 2102 } 2103 } 2104 2105 #ifdef ASSERT 2106 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2107 // Sanity check the known type with the incoming class. For the 2108 // primitive case the types must match exactly with src.klass and 2109 // dst.klass each exactly matching the default type. For the 2110 // object array case, if no type check is needed then either the 2111 // dst type is exactly the expected type and the src type is a 2112 // subtype which we can't check or src is the same array as dst 2113 // but not necessarily exactly of type default_type. 2114 Label known_ok, halt; 2115 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2116 if (UseCompressedClassPointers) { 2117 // tmp holds the default type. It currently comes uncompressed after the 2118 // load of a constant, so encode it. 2119 __ encode_klass_not_null(tmp); 2120 // load the raw value of the dst klass, since we will be comparing 2121 // uncompressed values directly. 2122 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2123 if (basic_type != T_OBJECT) { 2124 __ cmp(tmp, tmp2); 2125 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2126 // load the raw value of the src klass. 2127 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2128 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2129 } else { 2130 __ cmp(tmp, tmp2); 2131 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2132 __ delayed()->cmp(src, dst); 2133 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2134 __ delayed()->nop(); 2135 } 2136 } else { 2137 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2138 if (basic_type != T_OBJECT) { 2139 __ cmp(tmp, tmp2); 2140 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2141 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2142 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2143 } else { 2144 __ cmp(tmp, tmp2); 2145 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2146 __ delayed()->cmp(src, dst); 2147 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2148 __ delayed()->nop(); 2149 } 2150 } 2151 __ bind(halt); 2152 __ stop("incorrect type information in arraycopy"); 2153 __ bind(known_ok); 2154 } 2155 #endif 2156 2157 #ifndef PRODUCT 2158 if (PrintC1Statistics) { 2159 address counter = Runtime1::arraycopy_count_address(basic_type); 2160 __ inc_counter(counter, G1, G3); 2161 } 2162 #endif 2163 2164 Register src_ptr = O0; 2165 Register dst_ptr = O1; 2166 Register len = O2; 2167 2168 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2169 if (shift == 0) { 2170 __ add(src_ptr, src_pos, src_ptr); 2171 } else { 2172 __ sll(src_pos, shift, tmp); 2173 __ add(src_ptr, tmp, src_ptr); 2174 } 2175 2176 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2177 if (shift == 0) { 2178 __ add(dst_ptr, dst_pos, dst_ptr); 2179 } else { 2180 __ sll(dst_pos, shift, tmp); 2181 __ add(dst_ptr, tmp, dst_ptr); 2182 } 2183 2184 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2185 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2186 const char *name; 2187 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2188 2189 // arraycopy stubs takes a length in number of elements, so don't scale it. 2190 __ mov(length, len); 2191 __ call_VM_leaf(tmp, entry); 2192 2193 __ bind(*stub->continuation()); 2194 } 2195 2196 2197 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2198 if (dest->is_single_cpu()) { 2199 if (left->type() == T_OBJECT) { 2200 switch (code) { 2201 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2202 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2203 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2204 default: ShouldNotReachHere(); 2205 } 2206 } else 2207 switch (code) { 2208 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2209 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2210 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2211 default: ShouldNotReachHere(); 2212 } 2213 } else { 2214 switch (code) { 2215 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2216 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2217 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2218 default: ShouldNotReachHere(); 2219 } 2220 } 2221 } 2222 2223 2224 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2225 if (left->type() == T_OBJECT) { 2226 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2227 Register l = left->as_register(); 2228 Register d = dest->as_register_lo(); 2229 switch (code) { 2230 case lir_shl: __ sllx (l, count, d); break; 2231 case lir_shr: __ srax (l, count, d); break; 2232 case lir_ushr: __ srlx (l, count, d); break; 2233 default: ShouldNotReachHere(); 2234 } 2235 return; 2236 } 2237 2238 if (dest->is_single_cpu()) { 2239 count = count & 0x1F; // Java spec 2240 switch (code) { 2241 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2242 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2243 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2244 default: ShouldNotReachHere(); 2245 } 2246 } else if (dest->is_double_cpu()) { 2247 count = count & 63; // Java spec 2248 switch (code) { 2249 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2250 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2251 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2252 default: ShouldNotReachHere(); 2253 } 2254 } else { 2255 ShouldNotReachHere(); 2256 } 2257 } 2258 2259 2260 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2261 assert(op->tmp1()->as_register() == G1 && 2262 op->tmp2()->as_register() == G3 && 2263 op->tmp3()->as_register() == G4 && 2264 op->obj()->as_register() == O0 && 2265 op->klass()->as_register() == G5, "must be"); 2266 if (op->init_check()) { 2267 add_debug_info_for_null_check_here(op->stub()->info()); 2268 __ ldub(op->klass()->as_register(), 2269 in_bytes(InstanceKlass::init_state_offset()), 2270 op->tmp1()->as_register()); 2271 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2272 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2273 __ delayed()->nop(); 2274 } 2275 __ allocate_object(op->obj()->as_register(), 2276 op->tmp1()->as_register(), 2277 op->tmp2()->as_register(), 2278 op->tmp3()->as_register(), 2279 op->header_size(), 2280 op->object_size(), 2281 op->klass()->as_register(), 2282 *op->stub()->entry()); 2283 __ bind(*op->stub()->continuation()); 2284 __ verify_oop(op->obj()->as_register()); 2285 } 2286 2287 2288 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2289 assert(op->tmp1()->as_register() == G1 && 2290 op->tmp2()->as_register() == G3 && 2291 op->tmp3()->as_register() == G4 && 2292 op->tmp4()->as_register() == O1 && 2293 op->klass()->as_register() == G5, "must be"); 2294 2295 __ signx(op->len()->as_register()); 2296 if (UseSlowPath || 2297 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2298 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2299 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2300 __ delayed()->nop(); 2301 } else { 2302 __ allocate_array(op->obj()->as_register(), 2303 op->len()->as_register(), 2304 op->tmp1()->as_register(), 2305 op->tmp2()->as_register(), 2306 op->tmp3()->as_register(), 2307 arrayOopDesc::header_size(op->type()), 2308 type2aelembytes(op->type()), 2309 op->klass()->as_register(), 2310 *op->stub()->entry()); 2311 } 2312 __ bind(*op->stub()->continuation()); 2313 } 2314 2315 2316 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2317 ciMethodData *md, ciProfileData *data, 2318 Register recv, Register tmp1, Label* update_done) { 2319 uint i; 2320 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2321 Label next_test; 2322 // See if the receiver is receiver[n]. 2323 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2324 mdo_offset_bias); 2325 __ ld_ptr(receiver_addr, tmp1); 2326 __ verify_klass_ptr(tmp1); 2327 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2328 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2329 mdo_offset_bias); 2330 __ ld_ptr(data_addr, tmp1); 2331 __ add(tmp1, DataLayout::counter_increment, tmp1); 2332 __ st_ptr(tmp1, data_addr); 2333 __ ba(*update_done); 2334 __ delayed()->nop(); 2335 __ bind(next_test); 2336 } 2337 2338 // Didn't find receiver; find next empty slot and fill it in 2339 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2340 Label next_test; 2341 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2342 mdo_offset_bias); 2343 __ ld_ptr(recv_addr, tmp1); 2344 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2345 __ st_ptr(recv, recv_addr); 2346 __ set(DataLayout::counter_increment, tmp1); 2347 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2348 mdo_offset_bias); 2349 __ ba(*update_done); 2350 __ delayed()->nop(); 2351 __ bind(next_test); 2352 } 2353 } 2354 2355 2356 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2357 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2358 md = method->method_data_or_null(); 2359 assert(md != NULL, "Sanity"); 2360 data = md->bci_to_data(bci); 2361 assert(data != NULL, "need data for checkcast"); 2362 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2363 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2364 // The offset is large so bias the mdo by the base of the slot so 2365 // that the ld can use simm13s to reference the slots of the data 2366 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2367 } 2368 } 2369 2370 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2371 // we always need a stub for the failure case. 2372 CodeStub* stub = op->stub(); 2373 Register obj = op->object()->as_register(); 2374 Register k_RInfo = op->tmp1()->as_register(); 2375 Register klass_RInfo = op->tmp2()->as_register(); 2376 Register dst = op->result_opr()->as_register(); 2377 Register Rtmp1 = op->tmp3()->as_register(); 2378 ciKlass* k = op->klass(); 2379 2380 2381 if (obj == k_RInfo) { 2382 k_RInfo = klass_RInfo; 2383 klass_RInfo = obj; 2384 } 2385 2386 ciMethodData* md; 2387 ciProfileData* data; 2388 int mdo_offset_bias = 0; 2389 if (op->should_profile()) { 2390 ciMethod* method = op->profiled_method(); 2391 assert(method != NULL, "Should have method"); 2392 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2393 2394 Label not_null; 2395 __ br_notnull_short(obj, Assembler::pn, not_null); 2396 Register mdo = k_RInfo; 2397 Register data_val = Rtmp1; 2398 metadata2reg(md->constant_encoding(), mdo); 2399 if (mdo_offset_bias > 0) { 2400 __ set(mdo_offset_bias, data_val); 2401 __ add(mdo, data_val, mdo); 2402 } 2403 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2404 __ ldub(flags_addr, data_val); 2405 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2406 __ stb(data_val, flags_addr); 2407 __ ba(*obj_is_null); 2408 __ delayed()->nop(); 2409 __ bind(not_null); 2410 } else { 2411 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2412 __ delayed()->nop(); 2413 } 2414 2415 Label profile_cast_failure, profile_cast_success; 2416 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2417 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2418 2419 // patching may screw with our temporaries on sparc, 2420 // so let's do it before loading the class 2421 if (k->is_loaded()) { 2422 metadata2reg(k->constant_encoding(), k_RInfo); 2423 } else { 2424 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2425 } 2426 assert(obj != k_RInfo, "must be different"); 2427 2428 // get object class 2429 // not a safepoint as obj null check happens earlier 2430 __ load_klass(obj, klass_RInfo); 2431 if (op->fast_check()) { 2432 assert_different_registers(klass_RInfo, k_RInfo); 2433 __ cmp(k_RInfo, klass_RInfo); 2434 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2435 __ delayed()->nop(); 2436 } else { 2437 bool need_slow_path = true; 2438 if (k->is_loaded()) { 2439 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2440 need_slow_path = false; 2441 // perform the fast part of the checking logic 2442 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2443 (need_slow_path ? success_target : NULL), 2444 failure_target, NULL, 2445 RegisterOrConstant(k->super_check_offset())); 2446 } else { 2447 // perform the fast part of the checking logic 2448 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2449 failure_target, NULL); 2450 } 2451 if (need_slow_path) { 2452 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2453 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2454 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2455 __ delayed()->nop(); 2456 __ cmp(G3, 0); 2457 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2458 __ delayed()->nop(); 2459 // Fall through to success case 2460 } 2461 } 2462 2463 if (op->should_profile()) { 2464 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2465 assert_different_registers(obj, mdo, recv, tmp1); 2466 __ bind(profile_cast_success); 2467 metadata2reg(md->constant_encoding(), mdo); 2468 if (mdo_offset_bias > 0) { 2469 __ set(mdo_offset_bias, tmp1); 2470 __ add(mdo, tmp1, mdo); 2471 } 2472 __ load_klass(obj, recv); 2473 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2474 // Jump over the failure case 2475 __ ba(*success); 2476 __ delayed()->nop(); 2477 // Cast failure case 2478 __ bind(profile_cast_failure); 2479 metadata2reg(md->constant_encoding(), mdo); 2480 if (mdo_offset_bias > 0) { 2481 __ set(mdo_offset_bias, tmp1); 2482 __ add(mdo, tmp1, mdo); 2483 } 2484 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2485 __ ld_ptr(data_addr, tmp1); 2486 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2487 __ st_ptr(tmp1, data_addr); 2488 __ ba(*failure); 2489 __ delayed()->nop(); 2490 } 2491 __ ba(*success); 2492 __ delayed()->nop(); 2493 } 2494 2495 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2496 LIR_Code code = op->code(); 2497 if (code == lir_store_check) { 2498 Register value = op->object()->as_register(); 2499 Register array = op->array()->as_register(); 2500 Register k_RInfo = op->tmp1()->as_register(); 2501 Register klass_RInfo = op->tmp2()->as_register(); 2502 Register Rtmp1 = op->tmp3()->as_register(); 2503 2504 __ verify_oop(value); 2505 CodeStub* stub = op->stub(); 2506 // check if it needs to be profiled 2507 ciMethodData* md; 2508 ciProfileData* data; 2509 int mdo_offset_bias = 0; 2510 if (op->should_profile()) { 2511 ciMethod* method = op->profiled_method(); 2512 assert(method != NULL, "Should have method"); 2513 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2514 } 2515 Label profile_cast_success, profile_cast_failure, done; 2516 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2517 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2518 2519 if (op->should_profile()) { 2520 Label not_null; 2521 __ br_notnull_short(value, Assembler::pn, not_null); 2522 Register mdo = k_RInfo; 2523 Register data_val = Rtmp1; 2524 metadata2reg(md->constant_encoding(), mdo); 2525 if (mdo_offset_bias > 0) { 2526 __ set(mdo_offset_bias, data_val); 2527 __ add(mdo, data_val, mdo); 2528 } 2529 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2530 __ ldub(flags_addr, data_val); 2531 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2532 __ stb(data_val, flags_addr); 2533 __ ba_short(done); 2534 __ bind(not_null); 2535 } else { 2536 __ br_null_short(value, Assembler::pn, done); 2537 } 2538 add_debug_info_for_null_check_here(op->info_for_exception()); 2539 __ load_klass(array, k_RInfo); 2540 __ load_klass(value, klass_RInfo); 2541 2542 // get instance klass 2543 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo); 2544 // perform the fast part of the checking logic 2545 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2546 2547 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2548 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2549 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2550 __ delayed()->nop(); 2551 __ cmp(G3, 0); 2552 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2553 __ delayed()->nop(); 2554 // fall through to the success case 2555 2556 if (op->should_profile()) { 2557 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2558 assert_different_registers(value, mdo, recv, tmp1); 2559 __ bind(profile_cast_success); 2560 metadata2reg(md->constant_encoding(), mdo); 2561 if (mdo_offset_bias > 0) { 2562 __ set(mdo_offset_bias, tmp1); 2563 __ add(mdo, tmp1, mdo); 2564 } 2565 __ load_klass(value, recv); 2566 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2567 __ ba_short(done); 2568 // Cast failure case 2569 __ bind(profile_cast_failure); 2570 metadata2reg(md->constant_encoding(), mdo); 2571 if (mdo_offset_bias > 0) { 2572 __ set(mdo_offset_bias, tmp1); 2573 __ add(mdo, tmp1, mdo); 2574 } 2575 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2576 __ ld_ptr(data_addr, tmp1); 2577 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2578 __ st_ptr(tmp1, data_addr); 2579 __ ba(*stub->entry()); 2580 __ delayed()->nop(); 2581 } 2582 __ bind(done); 2583 } else if (code == lir_checkcast) { 2584 Register obj = op->object()->as_register(); 2585 Register dst = op->result_opr()->as_register(); 2586 Label success; 2587 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2588 __ bind(success); 2589 __ mov(obj, dst); 2590 } else if (code == lir_instanceof) { 2591 Register obj = op->object()->as_register(); 2592 Register dst = op->result_opr()->as_register(); 2593 Label success, failure, done; 2594 emit_typecheck_helper(op, &success, &failure, &failure); 2595 __ bind(failure); 2596 __ set(0, dst); 2597 __ ba_short(done); 2598 __ bind(success); 2599 __ set(1, dst); 2600 __ bind(done); 2601 } else { 2602 ShouldNotReachHere(); 2603 } 2604 2605 } 2606 2607 2608 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2609 if (op->code() == lir_cas_long) { 2610 assert(VM_Version::supports_cx8(), "wrong machine"); 2611 Register addr = op->addr()->as_pointer_register(); 2612 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2613 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2614 Register new_value_lo = op->new_value()->as_register_lo(); 2615 Register new_value_hi = op->new_value()->as_register_hi(); 2616 Register t1 = op->tmp1()->as_register(); 2617 Register t2 = op->tmp2()->as_register(); 2618 __ mov(cmp_value_lo, t1); 2619 __ mov(new_value_lo, t2); 2620 // perform the compare and swap operation 2621 __ casx(addr, t1, t2); 2622 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2623 // overwritten with the original value in "addr" and will be equal to t1. 2624 __ cmp(t1, t2); 2625 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2626 Register addr = op->addr()->as_pointer_register(); 2627 Register cmp_value = op->cmp_value()->as_register(); 2628 Register new_value = op->new_value()->as_register(); 2629 Register t1 = op->tmp1()->as_register(); 2630 Register t2 = op->tmp2()->as_register(); 2631 __ mov(cmp_value, t1); 2632 __ mov(new_value, t2); 2633 if (op->code() == lir_cas_obj) { 2634 if (UseCompressedOops) { 2635 __ encode_heap_oop(t1); 2636 __ encode_heap_oop(t2); 2637 __ cas(addr, t1, t2); 2638 } else { 2639 __ cas_ptr(addr, t1, t2); 2640 } 2641 } else { 2642 __ cas(addr, t1, t2); 2643 } 2644 __ cmp(t1, t2); 2645 } else { 2646 Unimplemented(); 2647 } 2648 } 2649 2650 void LIR_Assembler::set_24bit_FPU() { 2651 Unimplemented(); 2652 } 2653 2654 2655 void LIR_Assembler::reset_FPU() { 2656 Unimplemented(); 2657 } 2658 2659 2660 void LIR_Assembler::breakpoint() { 2661 __ breakpoint_trap(); 2662 } 2663 2664 2665 void LIR_Assembler::push(LIR_Opr opr) { 2666 Unimplemented(); 2667 } 2668 2669 2670 void LIR_Assembler::pop(LIR_Opr opr) { 2671 Unimplemented(); 2672 } 2673 2674 2675 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2676 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2677 Register dst = dst_opr->as_register(); 2678 Register reg = mon_addr.base(); 2679 int offset = mon_addr.disp(); 2680 // compute pointer to BasicLock 2681 if (mon_addr.is_simm13()) { 2682 __ add(reg, offset, dst); 2683 } else { 2684 __ set(offset, dst); 2685 __ add(dst, reg, dst); 2686 } 2687 } 2688 2689 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2690 assert(op->crc()->is_single_cpu(), "crc must be register"); 2691 assert(op->val()->is_single_cpu(), "byte value must be register"); 2692 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2693 Register crc = op->crc()->as_register(); 2694 Register val = op->val()->as_register(); 2695 Register table = op->result_opr()->as_register(); 2696 Register res = op->result_opr()->as_register(); 2697 2698 assert_different_registers(val, crc, table); 2699 2700 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 2701 __ not1(crc); 2702 __ clruwu(crc); 2703 __ update_byte_crc32(crc, val, table); 2704 __ not1(crc); 2705 2706 __ mov(crc, res); 2707 } 2708 2709 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2710 Register obj = op->obj_opr()->as_register(); 2711 Register hdr = op->hdr_opr()->as_register(); 2712 Register lock = op->lock_opr()->as_register(); 2713 2714 // obj may not be an oop 2715 if (op->code() == lir_lock) { 2716 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2717 if (UseFastLocking) { 2718 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2719 // add debug info for NullPointerException only if one is possible 2720 if (op->info() != NULL) { 2721 add_debug_info_for_null_check_here(op->info()); 2722 } 2723 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2724 } else { 2725 // always do slow locking 2726 // note: the slow locking code could be inlined here, however if we use 2727 // slow locking, speed doesn't matter anyway and this solution is 2728 // simpler and requires less duplicated code - additionally, the 2729 // slow locking code is the same in either case which simplifies 2730 // debugging 2731 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2732 __ delayed()->nop(); 2733 } 2734 } else { 2735 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2736 if (UseFastLocking) { 2737 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2738 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2739 } else { 2740 // always do slow unlocking 2741 // note: the slow unlocking code could be inlined here, however if we use 2742 // slow unlocking, speed doesn't matter anyway and this solution is 2743 // simpler and requires less duplicated code - additionally, the 2744 // slow unlocking code is the same in either case which simplifies 2745 // debugging 2746 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2747 __ delayed()->nop(); 2748 } 2749 } 2750 __ bind(*op->stub()->continuation()); 2751 } 2752 2753 2754 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2755 ciMethod* method = op->profiled_method(); 2756 int bci = op->profiled_bci(); 2757 ciMethod* callee = op->profiled_callee(); 2758 2759 // Update counter for all call types 2760 ciMethodData* md = method->method_data_or_null(); 2761 assert(md != NULL, "Sanity"); 2762 ciProfileData* data = md->bci_to_data(bci); 2763 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2764 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2765 Register mdo = op->mdo()->as_register(); 2766 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2767 Register tmp1 = op->tmp1()->as_register_lo(); 2768 metadata2reg(md->constant_encoding(), mdo); 2769 int mdo_offset_bias = 0; 2770 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2771 data->size_in_bytes())) { 2772 // The offset is large so bias the mdo by the base of the slot so 2773 // that the ld can use simm13s to reference the slots of the data 2774 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2775 __ set(mdo_offset_bias, O7); 2776 __ add(mdo, O7, mdo); 2777 } 2778 2779 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2780 // Perform additional virtual call profiling for invokevirtual and 2781 // invokeinterface bytecodes 2782 if (op->should_profile_receiver_type()) { 2783 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2784 Register recv = op->recv()->as_register(); 2785 assert_different_registers(mdo, tmp1, recv); 2786 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2787 ciKlass* known_klass = op->known_holder(); 2788 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2789 // We know the type that will be seen at this call site; we can 2790 // statically update the MethodData* rather than needing to do 2791 // dynamic tests on the receiver type 2792 2793 // NOTE: we should probably put a lock around this search to 2794 // avoid collisions by concurrent compilations 2795 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2796 uint i; 2797 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2798 ciKlass* receiver = vc_data->receiver(i); 2799 if (known_klass->equals(receiver)) { 2800 Address data_addr(mdo, md->byte_offset_of_slot(data, 2801 VirtualCallData::receiver_count_offset(i)) - 2802 mdo_offset_bias); 2803 __ ld_ptr(data_addr, tmp1); 2804 __ add(tmp1, DataLayout::counter_increment, tmp1); 2805 __ st_ptr(tmp1, data_addr); 2806 return; 2807 } 2808 } 2809 2810 // Receiver type not found in profile data; select an empty slot 2811 2812 // Note that this is less efficient than it should be because it 2813 // always does a write to the receiver part of the 2814 // VirtualCallData rather than just the first time 2815 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2816 ciKlass* receiver = vc_data->receiver(i); 2817 if (receiver == NULL) { 2818 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2819 mdo_offset_bias); 2820 metadata2reg(known_klass->constant_encoding(), tmp1); 2821 __ st_ptr(tmp1, recv_addr); 2822 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2823 mdo_offset_bias); 2824 __ ld_ptr(data_addr, tmp1); 2825 __ add(tmp1, DataLayout::counter_increment, tmp1); 2826 __ st_ptr(tmp1, data_addr); 2827 return; 2828 } 2829 } 2830 } else { 2831 __ load_klass(recv, recv); 2832 Label update_done; 2833 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2834 // Receiver did not match any saved receiver and there is no empty row for it. 2835 // Increment total counter to indicate polymorphic case. 2836 __ ld_ptr(counter_addr, tmp1); 2837 __ add(tmp1, DataLayout::counter_increment, tmp1); 2838 __ st_ptr(tmp1, counter_addr); 2839 2840 __ bind(update_done); 2841 } 2842 } else { 2843 // Static call 2844 __ ld_ptr(counter_addr, tmp1); 2845 __ add(tmp1, DataLayout::counter_increment, tmp1); 2846 __ st_ptr(tmp1, counter_addr); 2847 } 2848 } 2849 2850 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2851 Register obj = op->obj()->as_register(); 2852 Register tmp1 = op->tmp()->as_pointer_register(); 2853 Register tmp2 = G1; 2854 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2855 ciKlass* exact_klass = op->exact_klass(); 2856 intptr_t current_klass = op->current_klass(); 2857 bool not_null = op->not_null(); 2858 bool no_conflict = op->no_conflict(); 2859 2860 Label update, next, none; 2861 2862 bool do_null = !not_null; 2863 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2864 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2865 2866 assert(do_null || do_update, "why are we here?"); 2867 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2868 2869 __ verify_oop(obj); 2870 2871 if (tmp1 != obj) { 2872 __ mov(obj, tmp1); 2873 } 2874 if (do_null) { 2875 __ br_notnull_short(tmp1, Assembler::pt, update); 2876 if (!TypeEntries::was_null_seen(current_klass)) { 2877 __ ld_ptr(mdo_addr, tmp1); 2878 __ or3(tmp1, TypeEntries::null_seen, tmp1); 2879 __ st_ptr(tmp1, mdo_addr); 2880 } 2881 if (do_update) { 2882 __ ba(next); 2883 __ delayed()->nop(); 2884 } 2885 #ifdef ASSERT 2886 } else { 2887 __ br_notnull_short(tmp1, Assembler::pt, update); 2888 __ stop("unexpect null obj"); 2889 #endif 2890 } 2891 2892 __ bind(update); 2893 2894 if (do_update) { 2895 #ifdef ASSERT 2896 if (exact_klass != NULL) { 2897 Label ok; 2898 __ load_klass(tmp1, tmp1); 2899 metadata2reg(exact_klass->constant_encoding(), tmp2); 2900 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); 2901 __ stop("exact klass and actual klass differ"); 2902 __ bind(ok); 2903 } 2904 #endif 2905 2906 Label do_update; 2907 __ ld_ptr(mdo_addr, tmp2); 2908 2909 if (!no_conflict) { 2910 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2911 if (exact_klass != NULL) { 2912 metadata2reg(exact_klass->constant_encoding(), tmp1); 2913 } else { 2914 __ load_klass(tmp1, tmp1); 2915 } 2916 2917 __ xor3(tmp1, tmp2, tmp1); 2918 __ btst(TypeEntries::type_klass_mask, tmp1); 2919 // klass seen before, nothing to do. The unknown bit may have been 2920 // set already but no need to check. 2921 __ brx(Assembler::zero, false, Assembler::pt, next); 2922 __ delayed()-> 2923 2924 btst(TypeEntries::type_unknown, tmp1); 2925 // already unknown. Nothing to do anymore. 2926 __ brx(Assembler::notZero, false, Assembler::pt, next); 2927 2928 if (TypeEntries::is_type_none(current_klass)) { 2929 __ delayed()->btst(TypeEntries::type_mask, tmp2); 2930 __ brx(Assembler::zero, true, Assembler::pt, do_update); 2931 // first time here. Set profile type. 2932 __ delayed()->or3(tmp2, tmp1, tmp2); 2933 } else { 2934 __ delayed()->nop(); 2935 } 2936 } else { 2937 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2938 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2939 2940 __ btst(TypeEntries::type_unknown, tmp2); 2941 // already unknown. Nothing to do anymore. 2942 __ brx(Assembler::notZero, false, Assembler::pt, next); 2943 __ delayed()->nop(); 2944 } 2945 2946 // different than before. Cannot keep accurate profile. 2947 __ or3(tmp2, TypeEntries::type_unknown, tmp2); 2948 } else { 2949 // There's a single possible klass at this profile point 2950 assert(exact_klass != NULL, "should be"); 2951 if (TypeEntries::is_type_none(current_klass)) { 2952 metadata2reg(exact_klass->constant_encoding(), tmp1); 2953 __ xor3(tmp1, tmp2, tmp1); 2954 __ btst(TypeEntries::type_klass_mask, tmp1); 2955 __ brx(Assembler::zero, false, Assembler::pt, next); 2956 #ifdef ASSERT 2957 2958 { 2959 Label ok; 2960 __ delayed()->btst(TypeEntries::type_mask, tmp2); 2961 __ brx(Assembler::zero, true, Assembler::pt, ok); 2962 __ delayed()->nop(); 2963 2964 __ stop("unexpected profiling mismatch"); 2965 __ bind(ok); 2966 } 2967 // first time here. Set profile type. 2968 __ or3(tmp2, tmp1, tmp2); 2969 #else 2970 // first time here. Set profile type. 2971 __ delayed()->or3(tmp2, tmp1, tmp2); 2972 #endif 2973 2974 } else { 2975 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2976 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 2977 2978 // already unknown. Nothing to do anymore. 2979 __ btst(TypeEntries::type_unknown, tmp2); 2980 __ brx(Assembler::notZero, false, Assembler::pt, next); 2981 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); 2982 } 2983 } 2984 2985 __ bind(do_update); 2986 __ st_ptr(tmp2, mdo_addr); 2987 2988 __ bind(next); 2989 } 2990 } 2991 2992 void LIR_Assembler::align_backward_branch_target() { 2993 __ align(OptoLoopAlignment); 2994 } 2995 2996 2997 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2998 // make sure we are expecting a delay 2999 // this has the side effect of clearing the delay state 3000 // so we can use _masm instead of _masm->delayed() to do the 3001 // code generation. 3002 __ delayed(); 3003 3004 // make sure we only emit one instruction 3005 int offset = code_offset(); 3006 op->delay_op()->emit_code(this); 3007 #ifdef ASSERT 3008 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3009 op->delay_op()->print(); 3010 } 3011 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3012 "only one instruction can go in a delay slot"); 3013 #endif 3014 3015 // we may also be emitting the call info for the instruction 3016 // which we are the delay slot of. 3017 CodeEmitInfo* call_info = op->call_info(); 3018 if (call_info) { 3019 add_call_info(code_offset(), call_info); 3020 } 3021 3022 if (VerifyStackAtCalls) { 3023 _masm->sub(FP, SP, O7); 3024 _masm->cmp(O7, initial_frame_size_in_bytes()); 3025 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3026 } 3027 } 3028 3029 3030 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 3031 // tmp must be unused 3032 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3033 assert(left->is_register(), "can only handle registers"); 3034 3035 if (left->is_single_cpu()) { 3036 __ neg(left->as_register(), dest->as_register()); 3037 } else if (left->is_single_fpu()) { 3038 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3039 } else if (left->is_double_fpu()) { 3040 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3041 } else { 3042 assert (left->is_double_cpu(), "Must be a long"); 3043 Register Rlow = left->as_register_lo(); 3044 Register Rhi = left->as_register_hi(); 3045 __ sub(G0, Rlow, dest->as_register_lo()); 3046 } 3047 } 3048 3049 3050 void LIR_Assembler::fxch(int i) { 3051 Unimplemented(); 3052 } 3053 3054 void LIR_Assembler::fld(int i) { 3055 Unimplemented(); 3056 } 3057 3058 void LIR_Assembler::ffree(int i) { 3059 Unimplemented(); 3060 } 3061 3062 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3063 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3064 3065 // if tmp is invalid, then the function being called doesn't destroy the thread 3066 if (tmp->is_valid()) { 3067 __ save_thread(tmp->as_pointer_register()); 3068 } 3069 __ call(dest, relocInfo::runtime_call_type); 3070 __ delayed()->nop(); 3071 if (info != NULL) { 3072 add_call_info_here(info); 3073 } 3074 if (tmp->is_valid()) { 3075 __ restore_thread(tmp->as_pointer_register()); 3076 } 3077 3078 #ifdef ASSERT 3079 __ verify_thread(); 3080 #endif // ASSERT 3081 } 3082 3083 3084 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3085 ShouldNotReachHere(); 3086 3087 NEEDS_CLEANUP; 3088 if (type == T_LONG) { 3089 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3090 3091 // (extended to allow indexed as well as constant displaced for JSR-166) 3092 Register idx = noreg; // contains either constant offset or index 3093 3094 int disp = mem_addr->disp(); 3095 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3096 if (!Assembler::is_simm13(disp)) { 3097 idx = O7; 3098 __ set(disp, idx); 3099 } 3100 } else { 3101 assert(disp == 0, "not both indexed and disp"); 3102 idx = mem_addr->index()->as_register(); 3103 } 3104 3105 int null_check_offset = -1; 3106 3107 Register base = mem_addr->base()->as_register(); 3108 if (src->is_register() && dest->is_address()) { 3109 // G4 is high half, G5 is low half 3110 // clear the top bits of G5, and scale up G4 3111 __ srl (src->as_register_lo(), 0, G5); 3112 __ sllx(src->as_register_hi(), 32, G4); 3113 // combine the two halves into the 64 bits of G4 3114 __ or3(G4, G5, G4); 3115 null_check_offset = __ offset(); 3116 if (idx == noreg) { 3117 __ stx(G4, base, disp); 3118 } else { 3119 __ stx(G4, base, idx); 3120 } 3121 } else if (src->is_address() && dest->is_register()) { 3122 null_check_offset = __ offset(); 3123 if (idx == noreg) { 3124 __ ldx(base, disp, G5); 3125 } else { 3126 __ ldx(base, idx, G5); 3127 } 3128 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3129 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3130 } else { 3131 Unimplemented(); 3132 } 3133 if (info != NULL) { 3134 add_debug_info_for_null_check(null_check_offset, info); 3135 } 3136 3137 } else { 3138 // use normal move for all other volatiles since they don't need 3139 // special handling to remain atomic. 3140 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3141 } 3142 } 3143 3144 void LIR_Assembler::membar() { 3145 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3146 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3147 } 3148 3149 void LIR_Assembler::membar_acquire() { 3150 // no-op on TSO 3151 } 3152 3153 void LIR_Assembler::membar_release() { 3154 // no-op on TSO 3155 } 3156 3157 void LIR_Assembler::membar_loadload() { 3158 // no-op 3159 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3160 } 3161 3162 void LIR_Assembler::membar_storestore() { 3163 // no-op 3164 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3165 } 3166 3167 void LIR_Assembler::membar_loadstore() { 3168 // no-op 3169 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3170 } 3171 3172 void LIR_Assembler::membar_storeload() { 3173 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3174 } 3175 3176 void LIR_Assembler::on_spin_wait() { 3177 Unimplemented(); 3178 } 3179 3180 // Pack two sequential registers containing 32 bit values 3181 // into a single 64 bit register. 3182 // src and src->successor() are packed into dst 3183 // src and dst may be the same register. 3184 // Note: src is destroyed 3185 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3186 Register rs = src->as_register(); 3187 Register rd = dst->as_register_lo(); 3188 __ sllx(rs, 32, rs); 3189 __ srl(rs->successor(), 0, rs->successor()); 3190 __ or3(rs, rs->successor(), rd); 3191 } 3192 3193 // Unpack a 64 bit value in a register into 3194 // two sequential registers. 3195 // src is unpacked into dst and dst->successor() 3196 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3197 Register rs = src->as_register_lo(); 3198 Register rd = dst->as_register_hi(); 3199 assert_different_registers(rs, rd, rd->successor()); 3200 __ srlx(rs, 32, rd); 3201 __ srl (rs, 0, rd->successor()); 3202 } 3203 3204 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 3205 const LIR_Address* addr = addr_opr->as_address_ptr(); 3206 assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); 3207 const Register dest_reg = dest->as_pointer_register(); 3208 const Register base_reg = addr->base()->as_pointer_register(); 3209 3210 if (patch_code != lir_patch_none) { 3211 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id); 3212 assert(addr->disp() != 0, "must have"); 3213 assert(base_reg != G3_scratch, "invariant"); 3214 __ patchable_set(0, G3_scratch); 3215 patching_epilog(patch, patch_code, base_reg, info); 3216 assert(dest_reg != G3_scratch, "invariant"); 3217 if (addr->index()->is_valid()) { 3218 const Register index_reg = addr->index()->as_pointer_register(); 3219 assert(index_reg != G3_scratch, "invariant"); 3220 __ add(index_reg, G3_scratch, G3_scratch); 3221 } 3222 __ add(base_reg, G3_scratch, dest_reg); 3223 } else { 3224 if (Assembler::is_simm13(addr->disp())) { 3225 if (addr->index()->is_valid()) { 3226 const Register index_reg = addr->index()->as_pointer_register(); 3227 assert(index_reg != G3_scratch, "invariant"); 3228 __ add(base_reg, addr->disp(), G3_scratch); 3229 __ add(index_reg, G3_scratch, dest_reg); 3230 } else { 3231 __ add(base_reg, addr->disp(), dest_reg); 3232 } 3233 } else { 3234 __ set(addr->disp(), G3_scratch); 3235 if (addr->index()->is_valid()) { 3236 const Register index_reg = addr->index()->as_pointer_register(); 3237 assert(index_reg != G3_scratch, "invariant"); 3238 __ add(index_reg, G3_scratch, G3_scratch); 3239 } 3240 __ add(base_reg, G3_scratch, dest_reg); 3241 } 3242 } 3243 } 3244 3245 3246 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3247 assert(result_reg->is_register(), "check"); 3248 __ mov(G2_thread, result_reg->as_register()); 3249 } 3250 3251 #ifdef ASSERT 3252 // emit run-time assertion 3253 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3254 assert(op->code() == lir_assert, "must be"); 3255 3256 if (op->in_opr1()->is_valid()) { 3257 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3258 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3259 } else { 3260 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3261 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3262 } 3263 3264 Label ok; 3265 if (op->condition() != lir_cond_always) { 3266 Assembler::Condition acond; 3267 switch (op->condition()) { 3268 case lir_cond_equal: acond = Assembler::equal; break; 3269 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3270 case lir_cond_less: acond = Assembler::less; break; 3271 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3272 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 3273 case lir_cond_greater: acond = Assembler::greater; break; 3274 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 3275 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 3276 default: ShouldNotReachHere(); 3277 }; 3278 __ br(acond, false, Assembler::pt, ok); 3279 __ delayed()->nop(); 3280 } 3281 if (op->halt()) { 3282 const char* str = __ code_string(op->msg()); 3283 __ stop(str); 3284 } else { 3285 breakpoint(); 3286 } 3287 __ bind(ok); 3288 } 3289 #endif 3290 3291 void LIR_Assembler::peephole(LIR_List* lir) { 3292 LIR_OpList* inst = lir->instructions_list(); 3293 for (int i = 0; i < inst->length(); i++) { 3294 LIR_Op* op = inst->at(i); 3295 switch (op->code()) { 3296 case lir_cond_float_branch: 3297 case lir_branch: { 3298 LIR_OpBranch* branch = op->as_OpBranch(); 3299 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3300 LIR_Op* delay_op = NULL; 3301 // we'd like to be able to pull following instructions into 3302 // this slot but we don't know enough to do it safely yet so 3303 // only optimize block to block control flow. 3304 if (LIRFillDelaySlots && branch->block()) { 3305 LIR_Op* prev = inst->at(i - 1); 3306 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3307 // swap previous instruction into delay slot 3308 inst->at_put(i - 1, op); 3309 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3310 #ifndef PRODUCT 3311 if (LIRTracePeephole) { 3312 tty->print_cr("delayed"); 3313 inst->at(i - 1)->print(); 3314 inst->at(i)->print(); 3315 tty->cr(); 3316 } 3317 #endif 3318 continue; 3319 } 3320 } 3321 3322 if (!delay_op) { 3323 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3324 } 3325 inst->insert_before(i + 1, delay_op); 3326 break; 3327 } 3328 case lir_static_call: 3329 case lir_virtual_call: 3330 case lir_icvirtual_call: 3331 case lir_optvirtual_call: 3332 case lir_dynamic_call: { 3333 LIR_Op* prev = inst->at(i - 1); 3334 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3335 (op->code() != lir_virtual_call || 3336 !prev->result_opr()->is_single_cpu() || 3337 prev->result_opr()->as_register() != O0) && 3338 LIR_Assembler::is_single_instruction(prev)) { 3339 // Only moves without info can be put into the delay slot. 3340 // Also don't allow the setup of the receiver in the delay 3341 // slot for vtable calls. 3342 inst->at_put(i - 1, op); 3343 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3344 #ifndef PRODUCT 3345 if (LIRTracePeephole) { 3346 tty->print_cr("delayed"); 3347 inst->at(i - 1)->print(); 3348 inst->at(i)->print(); 3349 tty->cr(); 3350 } 3351 #endif 3352 } else { 3353 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3354 inst->insert_before(i + 1, delay_op); 3355 i++; 3356 } 3357 break; 3358 } 3359 } 3360 } 3361 } 3362 3363 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3364 LIR_Address* addr = src->as_address_ptr(); 3365 3366 assert(data == dest, "swap uses only 2 operands"); 3367 assert (code == lir_xchg, "no xadd on sparc"); 3368 3369 if (data->type() == T_INT) { 3370 __ swap(as_Address(addr), data->as_register()); 3371 } else if (data->is_oop()) { 3372 Register obj = data->as_register(); 3373 Register narrow = tmp->as_register(); 3374 assert(UseCompressedOops, "swap is 32bit only"); 3375 __ encode_heap_oop(obj, narrow); 3376 __ swap(as_Address(addr), narrow); 3377 __ decode_heap_oop(narrow, obj); 3378 } else { 3379 ShouldNotReachHere(); 3380 } 3381 } 3382 3383 #undef __