1 /* 2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/cardTableModRefBS.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "nativeInst_sparc.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 #define __ _masm-> 41 42 43 //------------------------------------------------------------ 44 45 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 47 if (opr->is_constant()) { 48 LIR_Const* constant = opr->as_constant_ptr(); 49 switch (constant->type()) { 50 case T_INT: { 51 jint value = constant->as_jint(); 52 return Assembler::is_simm13(value); 53 } 54 55 default: 56 return false; 57 } 58 } 59 return false; 60 } 61 62 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 64 switch (op->code()) { 65 case lir_null_check: 66 return true; 67 68 69 case lir_add: 70 case lir_ushr: 71 case lir_shr: 72 case lir_shl: 73 // integer shifts and adds are always one instruction 74 return op->result_opr()->is_single_cpu(); 75 76 77 case lir_move: { 78 LIR_Op1* op1 = op->as_Op1(); 79 LIR_Opr src = op1->in_opr(); 80 LIR_Opr dst = op1->result_opr(); 81 82 if (src == dst) { 83 NEEDS_CLEANUP; 84 // this works around a problem where moves with the same src and dst 85 // end up in the delay slot and then the assembler swallows the mov 86 // since it has no effect and then it complains because the delay slot 87 // is empty. returning false stops the optimizer from putting this in 88 // the delay slot 89 return false; 90 } 91 92 // don't put moves involving oops into the delay slot since the VerifyOops code 93 // will make it much larger than a single instruction. 94 if (VerifyOops) { 95 return false; 96 } 97 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 100 return false; 101 } 102 103 if (UseCompressedOops) { 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 106 } 107 108 if (UseCompressedClassPointers) { 109 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && 110 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; 111 } 112 113 if (dst->is_register()) { 114 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 115 return !PatchALot; 116 } else if (src->is_single_stack()) { 117 return true; 118 } 119 } 120 121 if (src->is_register()) { 122 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 123 return !PatchALot; 124 } else if (dst->is_single_stack()) { 125 return true; 126 } 127 } 128 129 if (dst->is_register() && 130 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 131 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 132 return true; 133 } 134 135 return false; 136 } 137 138 default: 139 return false; 140 } 141 ShouldNotReachHere(); 142 } 143 144 145 LIR_Opr LIR_Assembler::receiverOpr() { 146 return FrameMap::O0_oop_opr; 147 } 148 149 150 LIR_Opr LIR_Assembler::osrBufferPointer() { 151 return FrameMap::I0_opr; 152 } 153 154 155 int LIR_Assembler::initial_frame_size_in_bytes() const { 156 return in_bytes(frame_map()->framesize_in_bytes()); 157 } 158 159 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 161 // we fetch the class of the receiver (O0) and compare it with the cached class. 162 // If they do not match we jump to slow case. 163 int LIR_Assembler::check_icache() { 164 int offset = __ offset(); 165 __ inline_cache_check(O0, G5_inline_cache_reg); 166 return offset; 167 } 168 169 170 void LIR_Assembler::osr_entry() { 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 172 // 173 // 1. Create a new compiled activation. 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 175 // at the osr_bci; it is not initialized. 176 // 3. Jump to the continuation address in compiled code to resume execution. 177 178 // OSR entry point 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 181 ValueStack* entry_state = osr_entry->end()->state(); 182 int number_of_locks = entry_state->locks_size(); 183 184 // Create a frame for the compiled activation. 185 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 186 187 // OSR buffer is 188 // 189 // locals[nlocals-1..0] 190 // monitors[number_of_locks-1..0] 191 // 192 // locals is a direct copy of the interpreter frame so in the osr buffer 193 // so first slot in the local array is the last local from the interpreter 194 // and last slot is local[0] (receiver) from the interpreter 195 // 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 198 // in the interpreter frame (the method lock if a sync method) 199 200 // Initialize monitors in the compiled activation. 201 // I0: pointer to osr buffer 202 // 203 // All other registers are dead at this point and the locals will be 204 // copied into place by code emitted in the IR. 205 206 Register OSR_buf = osrBufferPointer()->as_register(); 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 208 int monitor_offset = BytesPerWord * method()->max_locals() + 209 (2 * BytesPerWord) * (number_of_locks - 1); 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 211 // the OSR buffer using 2 word entries: first the lock and then 212 // the oop. 213 for (int i = 0; i < number_of_locks; i++) { 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 215 #ifdef ASSERT 216 // verify the interpreter's monitor has a non-null object 217 { 218 Label L; 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 220 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 221 __ stop("locked object is NULL"); 222 __ bind(L); 223 } 224 #endif // ASSERT 225 // Copy the lock field into the compiled activation. 226 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 230 } 231 } 232 } 233 234 235 // -------------------------------------------------------------------------------------------- 236 237 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 238 if (!GenerateSynchronizationCode) return; 239 240 Register obj_reg = obj_opr->as_register(); 241 Register lock_reg = lock_opr->as_register(); 242 243 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 244 Register reg = mon_addr.base(); 245 int offset = mon_addr.disp(); 246 // compute pointer to BasicLock 247 if (mon_addr.is_simm13()) { 248 __ add(reg, offset, lock_reg); 249 } 250 else { 251 __ set(offset, lock_reg); 252 __ add(reg, lock_reg, lock_reg); 253 } 254 // unlock object 255 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 256 // _slow_case_stubs->append(slow_case); 257 // temporary fix: must be created after exceptionhandler, therefore as call stub 258 _slow_case_stubs->append(slow_case); 259 if (UseFastLocking) { 260 // try inlined fast unlocking first, revert to slow locking if it fails 261 // note: lock_reg points to the displaced header since the displaced header offset is 0! 262 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 263 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 264 } else { 265 // always do slow unlocking 266 // note: the slow unlocking code could be inlined here, however if we use 267 // slow unlocking, speed doesn't matter anyway and this solution is 268 // simpler and requires less duplicated code - additionally, the 269 // slow unlocking code is the same in either case which simplifies 270 // debugging 271 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 272 __ delayed()->nop(); 273 } 274 // done 275 __ bind(*slow_case->continuation()); 276 } 277 278 279 int LIR_Assembler::emit_exception_handler() { 280 // if the last instruction is a call (typically to do a throw which 281 // is coming at the end after block reordering) the return address 282 // must still point into the code area in order to avoid assertion 283 // failures when searching for the corresponding bci => add a nop 284 // (was bug 5/14/1999 - gri) 285 __ nop(); 286 287 // generate code for exception handler 288 ciMethod* method = compilation()->method(); 289 290 address handler_base = __ start_a_stub(exception_handler_size()); 291 292 if (handler_base == NULL) { 293 // not enough space left for the handler 294 bailout("exception handler overflow"); 295 return -1; 296 } 297 298 int offset = code_offset(); 299 300 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 301 __ delayed()->nop(); 302 __ should_not_reach_here(); 303 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 304 __ end_a_stub(); 305 306 return offset; 307 } 308 309 310 // Emit the code to remove the frame from the stack in the exception 311 // unwind path. 312 int LIR_Assembler::emit_unwind_handler() { 313 #ifndef PRODUCT 314 if (CommentedAssembly) { 315 _masm->block_comment("Unwind handler"); 316 } 317 #endif 318 319 int offset = code_offset(); 320 321 // Fetch the exception from TLS and clear out exception related thread state 322 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 323 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 324 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 325 326 __ bind(_unwind_handler_entry); 327 __ verify_not_null_oop(O0); 328 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 329 __ mov(O0, I0); // Preserve the exception 330 } 331 332 // Preform needed unlocking 333 MonitorExitStub* stub = NULL; 334 if (method()->is_synchronized()) { 335 monitor_address(0, FrameMap::I1_opr); 336 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 337 __ unlock_object(I3, I2, I1, *stub->entry()); 338 __ bind(*stub->continuation()); 339 } 340 341 if (compilation()->env()->dtrace_method_probes()) { 342 __ mov(G2_thread, O0); 343 __ save_thread(I1); // need to preserve thread in G2 across 344 // runtime call 345 metadata2reg(method()->constant_encoding(), O1); 346 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 347 __ delayed()->nop(); 348 __ restore_thread(I1); 349 } 350 351 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 352 __ mov(I0, O0); // Restore the exception 353 } 354 355 // dispatch to the unwind logic 356 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 357 __ delayed()->nop(); 358 359 // Emit the slow path assembly 360 if (stub != NULL) { 361 stub->emit_code(this); 362 } 363 364 return offset; 365 } 366 367 368 int LIR_Assembler::emit_deopt_handler() { 369 // if the last instruction is a call (typically to do a throw which 370 // is coming at the end after block reordering) the return address 371 // must still point into the code area in order to avoid assertion 372 // failures when searching for the corresponding bci => add a nop 373 // (was bug 5/14/1999 - gri) 374 __ nop(); 375 376 // generate code for deopt handler 377 ciMethod* method = compilation()->method(); 378 address handler_base = __ start_a_stub(deopt_handler_size()); 379 if (handler_base == NULL) { 380 // not enough space left for the handler 381 bailout("deopt handler overflow"); 382 return -1; 383 } 384 385 int offset = code_offset(); 386 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 387 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 388 __ delayed()->nop(); 389 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 390 __ end_a_stub(); 391 392 return offset; 393 } 394 395 396 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 397 if (o == NULL) { 398 __ set(NULL_WORD, reg); 399 } else { 400 int oop_index = __ oop_recorder()->find_index(o); 401 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop"); 402 RelocationHolder rspec = oop_Relocation::spec(oop_index); 403 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 404 } 405 } 406 407 408 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 409 // Allocate a new index in table to hold the object once it's been patched 410 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 411 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 412 413 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 414 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 415 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 416 // NULL will be dynamically patched later and the patched value may be large. We must 417 // therefore generate the sethi/add as a placeholders 418 __ patchable_set(addrlit, reg); 419 420 patching_epilog(patch, lir_patch_normal, reg, info); 421 } 422 423 424 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 425 __ set_metadata_constant(o, reg); 426 } 427 428 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 429 // Allocate a new index in table to hold the klass once it's been patched 430 int index = __ oop_recorder()->allocate_metadata_index(NULL); 431 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 432 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); 433 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 434 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 435 // NULL will be dynamically patched later and the patched value may be large. We must 436 // therefore generate the sethi/add as a placeholders 437 __ patchable_set(addrlit, reg); 438 439 patching_epilog(patch, lir_patch_normal, reg, info); 440 } 441 442 void LIR_Assembler::emit_op3(LIR_Op3* op) { 443 Register Rdividend = op->in_opr1()->as_register(); 444 Register Rdivisor = noreg; 445 Register Rscratch = op->in_opr3()->as_register(); 446 Register Rresult = op->result_opr()->as_register(); 447 int divisor = -1; 448 449 if (op->in_opr2()->is_register()) { 450 Rdivisor = op->in_opr2()->as_register(); 451 } else { 452 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 453 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 454 } 455 456 assert(Rdividend != Rscratch, ""); 457 assert(Rdivisor != Rscratch, ""); 458 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 459 460 if (Rdivisor == noreg && is_power_of_2(divisor)) { 461 // convert division by a power of two into some shifts and logical operations 462 if (op->code() == lir_idiv) { 463 if (divisor == 2) { 464 __ srl(Rdividend, 31, Rscratch); 465 } else { 466 __ sra(Rdividend, 31, Rscratch); 467 __ and3(Rscratch, divisor - 1, Rscratch); 468 } 469 __ add(Rdividend, Rscratch, Rscratch); 470 __ sra(Rscratch, log2_intptr(divisor), Rresult); 471 return; 472 } else { 473 if (divisor == 2) { 474 __ srl(Rdividend, 31, Rscratch); 475 } else { 476 __ sra(Rdividend, 31, Rscratch); 477 __ and3(Rscratch, divisor - 1,Rscratch); 478 } 479 __ add(Rdividend, Rscratch, Rscratch); 480 __ andn(Rscratch, divisor - 1,Rscratch); 481 __ sub(Rdividend, Rscratch, Rresult); 482 return; 483 } 484 } 485 486 __ sra(Rdividend, 31, Rscratch); 487 __ wry(Rscratch); 488 489 add_debug_info_for_div0_here(op->info()); 490 491 if (Rdivisor != noreg) { 492 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 493 } else { 494 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 495 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 496 } 497 498 Label skip; 499 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 500 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 501 __ bind(skip); 502 503 if (op->code() == lir_irem) { 504 if (Rdivisor != noreg) { 505 __ smul(Rscratch, Rdivisor, Rscratch); 506 } else { 507 __ smul(Rscratch, divisor, Rscratch); 508 } 509 __ sub(Rdividend, Rscratch, Rresult); 510 } 511 } 512 513 514 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 515 #ifdef ASSERT 516 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 517 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 518 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 519 #endif 520 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 521 522 if (op->cond() == lir_cond_always) { 523 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 524 } else if (op->code() == lir_cond_float_branch) { 525 assert(op->ublock() != NULL, "must have unordered successor"); 526 bool is_unordered = (op->ublock() == op->block()); 527 Assembler::Condition acond; 528 switch (op->cond()) { 529 case lir_cond_equal: acond = Assembler::f_equal; break; 530 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 531 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 532 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 533 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 534 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 535 default : ShouldNotReachHere(); 536 } 537 __ fb( acond, false, Assembler::pn, *(op->label())); 538 } else { 539 assert (op->code() == lir_branch, "just checking"); 540 541 Assembler::Condition acond; 542 switch (op->cond()) { 543 case lir_cond_equal: acond = Assembler::equal; break; 544 case lir_cond_notEqual: acond = Assembler::notEqual; break; 545 case lir_cond_less: acond = Assembler::less; break; 546 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 547 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 548 case lir_cond_greater: acond = Assembler::greater; break; 549 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 550 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 551 default: ShouldNotReachHere(); 552 }; 553 554 // sparc has different condition codes for testing 32-bit 555 // vs. 64-bit values. We could always test xcc is we could 556 // guarantee that 32-bit loads always sign extended but that isn't 557 // true and since sign extension isn't free, it would impose a 558 // slight cost. 559 #ifdef _LP64 560 if (op->type() == T_INT) { 561 __ br(acond, false, Assembler::pn, *(op->label())); 562 } else 563 #endif 564 __ brx(acond, false, Assembler::pn, *(op->label())); 565 } 566 // The peephole pass fills the delay slot 567 } 568 569 570 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 571 Bytecodes::Code code = op->bytecode(); 572 LIR_Opr dst = op->result_opr(); 573 574 switch(code) { 575 case Bytecodes::_i2l: { 576 Register rlo = dst->as_register_lo(); 577 Register rhi = dst->as_register_hi(); 578 Register rval = op->in_opr()->as_register(); 579 #ifdef _LP64 580 __ sra(rval, 0, rlo); 581 #else 582 __ mov(rval, rlo); 583 __ sra(rval, BitsPerInt-1, rhi); 584 #endif 585 break; 586 } 587 case Bytecodes::_i2d: 588 case Bytecodes::_i2f: { 589 bool is_double = (code == Bytecodes::_i2d); 590 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 591 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 592 FloatRegister rsrc = op->in_opr()->as_float_reg(); 593 if (rsrc != rdst) { 594 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 595 } 596 __ fitof(w, rdst, rdst); 597 break; 598 } 599 case Bytecodes::_f2i:{ 600 FloatRegister rsrc = op->in_opr()->as_float_reg(); 601 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 602 Label L; 603 // result must be 0 if value is NaN; test by comparing value to itself 604 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 605 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 606 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 607 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 608 // move integer result from float register to int register 609 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 610 __ bind (L); 611 break; 612 } 613 case Bytecodes::_l2i: { 614 Register rlo = op->in_opr()->as_register_lo(); 615 Register rhi = op->in_opr()->as_register_hi(); 616 Register rdst = dst->as_register(); 617 #ifdef _LP64 618 __ sra(rlo, 0, rdst); 619 #else 620 __ mov(rlo, rdst); 621 #endif 622 break; 623 } 624 case Bytecodes::_d2f: 625 case Bytecodes::_f2d: { 626 bool is_double = (code == Bytecodes::_f2d); 627 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 628 LIR_Opr val = op->in_opr(); 629 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 630 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 631 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 632 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 633 __ ftof(vw, dw, rval, rdst); 634 break; 635 } 636 case Bytecodes::_i2s: 637 case Bytecodes::_i2b: { 638 Register rval = op->in_opr()->as_register(); 639 Register rdst = dst->as_register(); 640 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 641 __ sll (rval, shift, rdst); 642 __ sra (rdst, shift, rdst); 643 break; 644 } 645 case Bytecodes::_i2c: { 646 Register rval = op->in_opr()->as_register(); 647 Register rdst = dst->as_register(); 648 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 649 __ sll (rval, shift, rdst); 650 __ srl (rdst, shift, rdst); 651 break; 652 } 653 654 default: ShouldNotReachHere(); 655 } 656 } 657 658 659 void LIR_Assembler::align_call(LIR_Code) { 660 // do nothing since all instructions are word aligned on sparc 661 } 662 663 664 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 665 __ call(op->addr(), rtype); 666 // The peephole pass fills the delay slot, add_call_info is done in 667 // LIR_Assembler::emit_delay. 668 } 669 670 671 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 672 __ ic_call(op->addr(), false); 673 // The peephole pass fills the delay slot, add_call_info is done in 674 // LIR_Assembler::emit_delay. 675 } 676 677 678 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 679 add_debug_info_for_null_check_here(op->info()); 680 __ load_klass(O0, G3_scratch); 681 if (Assembler::is_simm13(op->vtable_offset())) { 682 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 683 } else { 684 // This will generate 2 instructions 685 __ set(op->vtable_offset(), G5_method); 686 // ld_ptr, set_hi, set 687 __ ld_ptr(G3_scratch, G5_method, G5_method); 688 } 689 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); 690 __ callr(G3_scratch, G0); 691 // the peephole pass fills the delay slot 692 } 693 694 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 695 int store_offset; 696 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 697 assert(base != O7, "destroying register"); 698 assert(!unaligned, "can't handle this"); 699 // for offsets larger than a simm13 we setup the offset in O7 700 __ set(offset, O7); 701 store_offset = store(from_reg, base, O7, type, wide); 702 } else { 703 if (type == T_ARRAY || type == T_OBJECT) { 704 __ verify_oop(from_reg->as_register()); 705 } 706 store_offset = code_offset(); 707 switch (type) { 708 case T_BOOLEAN: // fall through 709 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 710 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 711 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 712 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 713 case T_LONG : 714 #ifdef _LP64 715 if (unaligned || PatchALot) { 716 // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem) 717 assert(G3_scratch != base, "can't handle this"); 718 assert(G3_scratch != from_reg->as_register_lo(), "can't handle this"); 719 __ srax(from_reg->as_register_lo(), 32, G3_scratch); 720 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 721 __ stw(G3_scratch, base, offset + hi_word_offset_in_bytes); 722 } else { 723 __ stx(from_reg->as_register_lo(), base, offset); 724 } 725 #else 726 assert(Assembler::is_simm13(offset + 4), "must be"); 727 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 728 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 729 #endif 730 break; 731 case T_ADDRESS: 732 case T_METADATA: 733 __ st_ptr(from_reg->as_register(), base, offset); 734 break; 735 case T_ARRAY : // fall through 736 case T_OBJECT: 737 { 738 if (UseCompressedOops && !wide) { 739 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 740 store_offset = code_offset(); 741 __ stw(G3_scratch, base, offset); 742 } else { 743 __ st_ptr(from_reg->as_register(), base, offset); 744 } 745 break; 746 } 747 748 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 749 case T_DOUBLE: 750 { 751 FloatRegister reg = from_reg->as_double_reg(); 752 // split unaligned stores 753 if (unaligned || PatchALot) { 754 assert(Assembler::is_simm13(offset + 4), "must be"); 755 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 756 __ stf(FloatRegisterImpl::S, reg, base, offset); 757 } else { 758 __ stf(FloatRegisterImpl::D, reg, base, offset); 759 } 760 break; 761 } 762 default : ShouldNotReachHere(); 763 } 764 } 765 return store_offset; 766 } 767 768 769 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 770 if (type == T_ARRAY || type == T_OBJECT) { 771 __ verify_oop(from_reg->as_register()); 772 } 773 int store_offset = code_offset(); 774 switch (type) { 775 case T_BOOLEAN: // fall through 776 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 777 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 778 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 779 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 780 case T_LONG : 781 #ifdef _LP64 782 __ stx(from_reg->as_register_lo(), base, disp); 783 #else 784 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 785 __ std(from_reg->as_register_hi(), base, disp); 786 #endif 787 break; 788 case T_ADDRESS: 789 __ st_ptr(from_reg->as_register(), base, disp); 790 break; 791 case T_ARRAY : // fall through 792 case T_OBJECT: 793 { 794 if (UseCompressedOops && !wide) { 795 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 796 store_offset = code_offset(); 797 __ stw(G3_scratch, base, disp); 798 } else { 799 __ st_ptr(from_reg->as_register(), base, disp); 800 } 801 break; 802 } 803 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 804 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 805 default : ShouldNotReachHere(); 806 } 807 return store_offset; 808 } 809 810 811 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 812 int load_offset; 813 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 814 assert(base != O7, "destroying register"); 815 assert(!unaligned, "can't handle this"); 816 // for offsets larger than a simm13 we setup the offset in O7 817 __ set(offset, O7); 818 load_offset = load(base, O7, to_reg, type, wide); 819 } else { 820 load_offset = code_offset(); 821 switch(type) { 822 case T_BOOLEAN: // fall through 823 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 824 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 825 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 826 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 827 case T_LONG : 828 if (!unaligned && !PatchALot) { 829 #ifdef _LP64 830 __ ldx(base, offset, to_reg->as_register_lo()); 831 #else 832 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 833 "must be sequential"); 834 __ ldd(base, offset, to_reg->as_register_hi()); 835 #endif 836 } else { 837 #ifdef _LP64 838 assert(base != to_reg->as_register_lo(), "can't handle this"); 839 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 840 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 841 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 842 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 843 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 844 #else 845 if (base == to_reg->as_register_lo()) { 846 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 847 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 848 } else { 849 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 850 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 851 } 852 #endif 853 } 854 break; 855 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; 856 case T_ADDRESS: 857 #ifdef _LP64 858 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 859 __ lduw(base, offset, to_reg->as_register()); 860 __ decode_klass_not_null(to_reg->as_register()); 861 } else 862 #endif 863 { 864 __ ld_ptr(base, offset, to_reg->as_register()); 865 } 866 break; 867 case T_ARRAY : // fall through 868 case T_OBJECT: 869 { 870 if (UseCompressedOops && !wide) { 871 __ lduw(base, offset, to_reg->as_register()); 872 __ decode_heap_oop(to_reg->as_register()); 873 } else { 874 __ ld_ptr(base, offset, to_reg->as_register()); 875 } 876 break; 877 } 878 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 879 case T_DOUBLE: 880 { 881 FloatRegister reg = to_reg->as_double_reg(); 882 // split unaligned loads 883 if (unaligned || PatchALot) { 884 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 885 __ ldf(FloatRegisterImpl::S, base, offset, reg); 886 } else { 887 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 888 } 889 break; 890 } 891 default : ShouldNotReachHere(); 892 } 893 if (type == T_ARRAY || type == T_OBJECT) { 894 __ verify_oop(to_reg->as_register()); 895 } 896 } 897 return load_offset; 898 } 899 900 901 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 902 int load_offset = code_offset(); 903 switch(type) { 904 case T_BOOLEAN: // fall through 905 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 906 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 907 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 908 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 909 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 910 case T_ARRAY : // fall through 911 case T_OBJECT: 912 { 913 if (UseCompressedOops && !wide) { 914 __ lduw(base, disp, to_reg->as_register()); 915 __ decode_heap_oop(to_reg->as_register()); 916 } else { 917 __ ld_ptr(base, disp, to_reg->as_register()); 918 } 919 break; 920 } 921 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 922 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 923 case T_LONG : 924 #ifdef _LP64 925 __ ldx(base, disp, to_reg->as_register_lo()); 926 #else 927 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 928 "must be sequential"); 929 __ ldd(base, disp, to_reg->as_register_hi()); 930 #endif 931 break; 932 default : ShouldNotReachHere(); 933 } 934 if (type == T_ARRAY || type == T_OBJECT) { 935 __ verify_oop(to_reg->as_register()); 936 } 937 return load_offset; 938 } 939 940 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 941 LIR_Const* c = src->as_constant_ptr(); 942 switch (c->type()) { 943 case T_INT: 944 case T_FLOAT: { 945 Register src_reg = O7; 946 int value = c->as_jint_bits(); 947 if (value == 0) { 948 src_reg = G0; 949 } else { 950 __ set(value, O7); 951 } 952 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 953 __ stw(src_reg, addr.base(), addr.disp()); 954 break; 955 } 956 case T_ADDRESS: { 957 Register src_reg = O7; 958 int value = c->as_jint_bits(); 959 if (value == 0) { 960 src_reg = G0; 961 } else { 962 __ set(value, O7); 963 } 964 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 965 __ st_ptr(src_reg, addr.base(), addr.disp()); 966 break; 967 } 968 case T_OBJECT: { 969 Register src_reg = O7; 970 jobject2reg(c->as_jobject(), src_reg); 971 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 972 __ st_ptr(src_reg, addr.base(), addr.disp()); 973 break; 974 } 975 case T_LONG: 976 case T_DOUBLE: { 977 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 978 979 Register tmp = O7; 980 int value_lo = c->as_jint_lo_bits(); 981 if (value_lo == 0) { 982 tmp = G0; 983 } else { 984 __ set(value_lo, O7); 985 } 986 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 987 int value_hi = c->as_jint_hi_bits(); 988 if (value_hi == 0) { 989 tmp = G0; 990 } else { 991 __ set(value_hi, O7); 992 } 993 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 994 break; 995 } 996 default: 997 Unimplemented(); 998 } 999 } 1000 1001 1002 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1003 LIR_Const* c = src->as_constant_ptr(); 1004 LIR_Address* addr = dest->as_address_ptr(); 1005 Register base = addr->base()->as_pointer_register(); 1006 int offset = -1; 1007 1008 switch (c->type()) { 1009 case T_INT: 1010 case T_FLOAT: 1011 case T_ADDRESS: { 1012 LIR_Opr tmp = FrameMap::O7_opr; 1013 int value = c->as_jint_bits(); 1014 if (value == 0) { 1015 tmp = FrameMap::G0_opr; 1016 } else if (Assembler::is_simm13(value)) { 1017 __ set(value, O7); 1018 } 1019 if (addr->index()->is_valid()) { 1020 assert(addr->disp() == 0, "must be zero"); 1021 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1022 } else { 1023 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1024 offset = store(tmp, base, addr->disp(), type, wide, false); 1025 } 1026 break; 1027 } 1028 case T_LONG: 1029 case T_DOUBLE: { 1030 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1031 assert(Assembler::is_simm13(addr->disp()) && 1032 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1033 1034 LIR_Opr tmp = FrameMap::O7_opr; 1035 int value_lo = c->as_jint_lo_bits(); 1036 if (value_lo == 0) { 1037 tmp = FrameMap::G0_opr; 1038 } else { 1039 __ set(value_lo, O7); 1040 } 1041 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1042 int value_hi = c->as_jint_hi_bits(); 1043 if (value_hi == 0) { 1044 tmp = FrameMap::G0_opr; 1045 } else { 1046 __ set(value_hi, O7); 1047 } 1048 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1049 break; 1050 } 1051 case T_OBJECT: { 1052 jobject obj = c->as_jobject(); 1053 LIR_Opr tmp; 1054 if (obj == NULL) { 1055 tmp = FrameMap::G0_opr; 1056 } else { 1057 tmp = FrameMap::O7_opr; 1058 jobject2reg(c->as_jobject(), O7); 1059 } 1060 // handle either reg+reg or reg+disp address 1061 if (addr->index()->is_valid()) { 1062 assert(addr->disp() == 0, "must be zero"); 1063 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1064 } else { 1065 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1066 offset = store(tmp, base, addr->disp(), type, wide, false); 1067 } 1068 1069 break; 1070 } 1071 default: 1072 Unimplemented(); 1073 } 1074 if (info != NULL) { 1075 assert(offset != -1, "offset should've been set"); 1076 add_debug_info_for_null_check(offset, info); 1077 } 1078 } 1079 1080 1081 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1082 LIR_Const* c = src->as_constant_ptr(); 1083 LIR_Opr to_reg = dest; 1084 1085 switch (c->type()) { 1086 case T_INT: 1087 case T_ADDRESS: 1088 { 1089 jint con = c->as_jint(); 1090 if (to_reg->is_single_cpu()) { 1091 assert(patch_code == lir_patch_none, "no patching handled here"); 1092 __ set(con, to_reg->as_register()); 1093 } else { 1094 ShouldNotReachHere(); 1095 assert(to_reg->is_single_fpu(), "wrong register kind"); 1096 1097 __ set(con, O7); 1098 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1099 __ st(O7, temp_slot); 1100 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1101 } 1102 } 1103 break; 1104 1105 case T_LONG: 1106 { 1107 jlong con = c->as_jlong(); 1108 1109 if (to_reg->is_double_cpu()) { 1110 #ifdef _LP64 1111 __ set(con, to_reg->as_register_lo()); 1112 #else 1113 __ set(low(con), to_reg->as_register_lo()); 1114 __ set(high(con), to_reg->as_register_hi()); 1115 #endif 1116 #ifdef _LP64 1117 } else if (to_reg->is_single_cpu()) { 1118 __ set(con, to_reg->as_register()); 1119 #endif 1120 } else { 1121 ShouldNotReachHere(); 1122 assert(to_reg->is_double_fpu(), "wrong register kind"); 1123 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1124 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1125 __ set(low(con), O7); 1126 __ st(O7, temp_slot_lo); 1127 __ set(high(con), O7); 1128 __ st(O7, temp_slot_hi); 1129 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1130 } 1131 } 1132 break; 1133 1134 case T_OBJECT: 1135 { 1136 if (patch_code == lir_patch_none) { 1137 jobject2reg(c->as_jobject(), to_reg->as_register()); 1138 } else { 1139 jobject2reg_with_patching(to_reg->as_register(), info); 1140 } 1141 } 1142 break; 1143 1144 case T_METADATA: 1145 { 1146 if (patch_code == lir_patch_none) { 1147 metadata2reg(c->as_metadata(), to_reg->as_register()); 1148 } else { 1149 klass2reg_with_patching(to_reg->as_register(), info); 1150 } 1151 } 1152 break; 1153 1154 case T_FLOAT: 1155 { 1156 address const_addr = __ float_constant(c->as_jfloat()); 1157 if (const_addr == NULL) { 1158 bailout("const section overflow"); 1159 break; 1160 } 1161 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1162 AddressLiteral const_addrlit(const_addr, rspec); 1163 if (to_reg->is_single_fpu()) { 1164 __ patchable_sethi(const_addrlit, O7); 1165 __ relocate(rspec); 1166 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1167 1168 } else { 1169 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1170 1171 __ set(const_addrlit, O7); 1172 __ ld(O7, 0, to_reg->as_register()); 1173 } 1174 } 1175 break; 1176 1177 case T_DOUBLE: 1178 { 1179 address const_addr = __ double_constant(c->as_jdouble()); 1180 if (const_addr == NULL) { 1181 bailout("const section overflow"); 1182 break; 1183 } 1184 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1185 1186 if (to_reg->is_double_fpu()) { 1187 AddressLiteral const_addrlit(const_addr, rspec); 1188 __ patchable_sethi(const_addrlit, O7); 1189 __ relocate(rspec); 1190 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1191 } else { 1192 assert(to_reg->is_double_cpu(), "Must be a long register."); 1193 #ifdef _LP64 1194 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1195 #else 1196 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1197 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1198 #endif 1199 } 1200 1201 } 1202 break; 1203 1204 default: 1205 ShouldNotReachHere(); 1206 } 1207 } 1208 1209 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1210 Register reg = addr->base()->as_pointer_register(); 1211 LIR_Opr index = addr->index(); 1212 if (index->is_illegal()) { 1213 return Address(reg, addr->disp()); 1214 } else { 1215 assert (addr->disp() == 0, "unsupported address mode"); 1216 return Address(reg, index->as_pointer_register()); 1217 } 1218 } 1219 1220 1221 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1222 switch (type) { 1223 case T_INT: 1224 case T_FLOAT: { 1225 Register tmp = O7; 1226 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1227 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1228 __ lduw(from.base(), from.disp(), tmp); 1229 __ stw(tmp, to.base(), to.disp()); 1230 break; 1231 } 1232 case T_OBJECT: { 1233 Register tmp = O7; 1234 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1235 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1236 __ ld_ptr(from.base(), from.disp(), tmp); 1237 __ st_ptr(tmp, to.base(), to.disp()); 1238 break; 1239 } 1240 case T_LONG: 1241 case T_DOUBLE: { 1242 Register tmp = O7; 1243 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1244 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1245 __ lduw(from.base(), from.disp(), tmp); 1246 __ stw(tmp, to.base(), to.disp()); 1247 __ lduw(from.base(), from.disp() + 4, tmp); 1248 __ stw(tmp, to.base(), to.disp() + 4); 1249 break; 1250 } 1251 1252 default: 1253 ShouldNotReachHere(); 1254 } 1255 } 1256 1257 1258 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1259 Address base = as_Address(addr); 1260 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1261 } 1262 1263 1264 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1265 Address base = as_Address(addr); 1266 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1267 } 1268 1269 1270 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1271 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1272 1273 assert(type != T_METADATA, "load of metadata ptr not supported"); 1274 LIR_Address* addr = src_opr->as_address_ptr(); 1275 LIR_Opr to_reg = dest; 1276 1277 Register src = addr->base()->as_pointer_register(); 1278 Register disp_reg = noreg; 1279 int disp_value = addr->disp(); 1280 bool needs_patching = (patch_code != lir_patch_none); 1281 1282 if (addr->base()->type() == T_OBJECT) { 1283 __ verify_oop(src); 1284 } 1285 1286 PatchingStub* patch = NULL; 1287 if (needs_patching) { 1288 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1289 assert(!to_reg->is_double_cpu() || 1290 patch_code == lir_patch_none || 1291 patch_code == lir_patch_normal, "patching doesn't match register"); 1292 } 1293 1294 if (addr->index()->is_illegal()) { 1295 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1296 if (needs_patching) { 1297 __ patchable_set(0, O7); 1298 } else { 1299 __ set(disp_value, O7); 1300 } 1301 disp_reg = O7; 1302 } 1303 } else if (unaligned || PatchALot) { 1304 __ add(src, addr->index()->as_pointer_register(), O7); 1305 src = O7; 1306 } else { 1307 disp_reg = addr->index()->as_pointer_register(); 1308 assert(disp_value == 0, "can't handle 3 operand addresses"); 1309 } 1310 1311 // remember the offset of the load. The patching_epilog must be done 1312 // before the call to add_debug_info, otherwise the PcDescs don't get 1313 // entered in increasing order. 1314 int offset = code_offset(); 1315 1316 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1317 if (disp_reg == noreg) { 1318 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1319 } else { 1320 assert(!unaligned, "can't handle this"); 1321 offset = load(src, disp_reg, to_reg, type, wide); 1322 } 1323 1324 if (patch != NULL) { 1325 patching_epilog(patch, patch_code, src, info); 1326 } 1327 if (info != NULL) add_debug_info_for_null_check(offset, info); 1328 } 1329 1330 1331 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1332 Address addr; 1333 if (src->is_single_word()) { 1334 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1335 } else if (src->is_double_word()) { 1336 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1337 } 1338 1339 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1340 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1341 } 1342 1343 1344 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1345 Address addr; 1346 if (dest->is_single_word()) { 1347 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1348 } else if (dest->is_double_word()) { 1349 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1350 } 1351 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1352 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1353 } 1354 1355 1356 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1357 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1358 if (from_reg->is_double_fpu()) { 1359 // double to double moves 1360 assert(to_reg->is_double_fpu(), "should match"); 1361 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1362 } else { 1363 // float to float moves 1364 assert(to_reg->is_single_fpu(), "should match"); 1365 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1366 } 1367 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1368 if (from_reg->is_double_cpu()) { 1369 #ifdef _LP64 1370 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1371 #else 1372 assert(to_reg->is_double_cpu() && 1373 from_reg->as_register_hi() != to_reg->as_register_lo() && 1374 from_reg->as_register_lo() != to_reg->as_register_hi(), 1375 "should both be long and not overlap"); 1376 // long to long moves 1377 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1378 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1379 #endif 1380 #ifdef _LP64 1381 } else if (to_reg->is_double_cpu()) { 1382 // int to int moves 1383 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1384 #endif 1385 } else { 1386 // int to int moves 1387 __ mov(from_reg->as_register(), to_reg->as_register()); 1388 } 1389 } else { 1390 ShouldNotReachHere(); 1391 } 1392 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1393 __ verify_oop(to_reg->as_register()); 1394 } 1395 } 1396 1397 1398 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1399 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1400 bool wide, bool unaligned) { 1401 assert(type != T_METADATA, "store of metadata ptr not supported"); 1402 LIR_Address* addr = dest->as_address_ptr(); 1403 1404 Register src = addr->base()->as_pointer_register(); 1405 Register disp_reg = noreg; 1406 int disp_value = addr->disp(); 1407 bool needs_patching = (patch_code != lir_patch_none); 1408 1409 if (addr->base()->is_oop_register()) { 1410 __ verify_oop(src); 1411 } 1412 1413 PatchingStub* patch = NULL; 1414 if (needs_patching) { 1415 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1416 assert(!from_reg->is_double_cpu() || 1417 patch_code == lir_patch_none || 1418 patch_code == lir_patch_normal, "patching doesn't match register"); 1419 } 1420 1421 if (addr->index()->is_illegal()) { 1422 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1423 if (needs_patching) { 1424 __ patchable_set(0, O7); 1425 } else { 1426 __ set(disp_value, O7); 1427 } 1428 disp_reg = O7; 1429 } 1430 } else if (unaligned || PatchALot) { 1431 __ add(src, addr->index()->as_pointer_register(), O7); 1432 src = O7; 1433 } else { 1434 disp_reg = addr->index()->as_pointer_register(); 1435 assert(disp_value == 0, "can't handle 3 operand addresses"); 1436 } 1437 1438 // remember the offset of the store. The patching_epilog must be done 1439 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1440 // entered in increasing order. 1441 int offset; 1442 1443 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1444 if (disp_reg == noreg) { 1445 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1446 } else { 1447 assert(!unaligned, "can't handle this"); 1448 offset = store(from_reg, src, disp_reg, type, wide); 1449 } 1450 1451 if (patch != NULL) { 1452 patching_epilog(patch, patch_code, src, info); 1453 } 1454 1455 if (info != NULL) add_debug_info_for_null_check(offset, info); 1456 } 1457 1458 1459 void LIR_Assembler::return_op(LIR_Opr result) { 1460 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1461 __ reserved_stack_check(); 1462 } 1463 // the poll may need a register so just pick one that isn't the return register 1464 #if defined(TIERED) && !defined(_LP64) 1465 if (result->type_field() == LIR_OprDesc::long_type) { 1466 // Must move the result to G1 1467 // Must leave proper result in O0,O1 and G1 (TIERED only) 1468 __ sllx(I0, 32, G1); // Shift bits into high G1 1469 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1470 __ or3 (I1, G1, G1); // OR 64 bits into G1 1471 #ifdef ASSERT 1472 // mangle it so any problems will show up 1473 __ set(0xdeadbeef, I0); 1474 __ set(0xdeadbeef, I1); 1475 #endif 1476 } 1477 #endif // TIERED 1478 __ set((intptr_t)os::get_polling_page(), L0); 1479 __ relocate(relocInfo::poll_return_type); 1480 __ ld_ptr(L0, 0, G0); 1481 __ ret(); 1482 __ delayed()->restore(); 1483 } 1484 1485 1486 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1487 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1488 if (info != NULL) { 1489 add_debug_info_for_branch(info); 1490 } 1491 int offset = __ offset(); 1492 __ relocate(relocInfo::poll_type); 1493 __ ld_ptr(tmp->as_register(), 0, G0); 1494 return offset; 1495 } 1496 1497 1498 void LIR_Assembler::emit_static_call_stub() { 1499 address call_pc = __ pc(); 1500 address stub = __ start_a_stub(call_stub_size()); 1501 if (stub == NULL) { 1502 bailout("static call stub overflow"); 1503 return; 1504 } 1505 1506 int start = __ offset(); 1507 __ relocate(static_stub_Relocation::spec(call_pc)); 1508 1509 __ set_metadata(NULL, G5); 1510 // must be set to -1 at code generation time 1511 AddressLiteral addrlit(-1); 1512 __ jump_to(addrlit, G3); 1513 __ delayed()->nop(); 1514 1515 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1516 __ end_a_stub(); 1517 } 1518 1519 1520 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1521 if (opr1->is_single_fpu()) { 1522 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1523 } else if (opr1->is_double_fpu()) { 1524 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1525 } else if (opr1->is_single_cpu()) { 1526 if (opr2->is_constant()) { 1527 switch (opr2->as_constant_ptr()->type()) { 1528 case T_INT: 1529 { jint con = opr2->as_constant_ptr()->as_jint(); 1530 if (Assembler::is_simm13(con)) { 1531 __ cmp(opr1->as_register(), con); 1532 } else { 1533 __ set(con, O7); 1534 __ cmp(opr1->as_register(), O7); 1535 } 1536 } 1537 break; 1538 1539 case T_OBJECT: 1540 // there are only equal/notequal comparisions on objects 1541 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1542 if (con == NULL) { 1543 __ cmp(opr1->as_register(), 0); 1544 } else { 1545 jobject2reg(con, O7); 1546 __ cmp(opr1->as_register(), O7); 1547 } 1548 } 1549 break; 1550 1551 default: 1552 ShouldNotReachHere(); 1553 break; 1554 } 1555 } else { 1556 if (opr2->is_address()) { 1557 LIR_Address * addr = opr2->as_address_ptr(); 1558 BasicType type = addr->type(); 1559 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1560 else __ ld(as_Address(addr), O7); 1561 __ cmp(opr1->as_register(), O7); 1562 } else { 1563 __ cmp(opr1->as_register(), opr2->as_register()); 1564 } 1565 } 1566 } else if (opr1->is_double_cpu()) { 1567 Register xlo = opr1->as_register_lo(); 1568 Register xhi = opr1->as_register_hi(); 1569 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1570 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1571 #ifdef _LP64 1572 __ orcc(xhi, G0, G0); 1573 #else 1574 __ orcc(xhi, xlo, G0); 1575 #endif 1576 } else if (opr2->is_register()) { 1577 Register ylo = opr2->as_register_lo(); 1578 Register yhi = opr2->as_register_hi(); 1579 #ifdef _LP64 1580 __ cmp(xlo, ylo); 1581 #else 1582 __ subcc(xlo, ylo, xlo); 1583 __ subccc(xhi, yhi, xhi); 1584 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1585 __ orcc(xhi, xlo, G0); 1586 } 1587 #endif 1588 } else { 1589 ShouldNotReachHere(); 1590 } 1591 } else if (opr1->is_address()) { 1592 LIR_Address * addr = opr1->as_address_ptr(); 1593 BasicType type = addr->type(); 1594 assert (opr2->is_constant(), "Checking"); 1595 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1596 else __ ld(as_Address(addr), O7); 1597 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1598 } else { 1599 ShouldNotReachHere(); 1600 } 1601 } 1602 1603 1604 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1605 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1606 bool is_unordered_less = (code == lir_ucmp_fd2i); 1607 if (left->is_single_fpu()) { 1608 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1609 } else if (left->is_double_fpu()) { 1610 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1611 } else { 1612 ShouldNotReachHere(); 1613 } 1614 } else if (code == lir_cmp_l2i) { 1615 #ifdef _LP64 1616 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1617 #else 1618 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1619 right->as_register_hi(), right->as_register_lo(), 1620 dst->as_register()); 1621 #endif 1622 } else { 1623 ShouldNotReachHere(); 1624 } 1625 } 1626 1627 1628 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1629 Assembler::Condition acond; 1630 switch (condition) { 1631 case lir_cond_equal: acond = Assembler::equal; break; 1632 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1633 case lir_cond_less: acond = Assembler::less; break; 1634 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1635 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1636 case lir_cond_greater: acond = Assembler::greater; break; 1637 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1638 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1639 default: ShouldNotReachHere(); 1640 }; 1641 1642 if (opr1->is_constant() && opr1->type() == T_INT) { 1643 Register dest = result->as_register(); 1644 // load up first part of constant before branch 1645 // and do the rest in the delay slot. 1646 if (!Assembler::is_simm13(opr1->as_jint())) { 1647 __ sethi(opr1->as_jint(), dest); 1648 } 1649 } else if (opr1->is_constant()) { 1650 const2reg(opr1, result, lir_patch_none, NULL); 1651 } else if (opr1->is_register()) { 1652 reg2reg(opr1, result); 1653 } else if (opr1->is_stack()) { 1654 stack2reg(opr1, result, result->type()); 1655 } else { 1656 ShouldNotReachHere(); 1657 } 1658 Label skip; 1659 #ifdef _LP64 1660 if (type == T_INT) { 1661 __ br(acond, false, Assembler::pt, skip); 1662 } else 1663 #endif 1664 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1665 if (opr1->is_constant() && opr1->type() == T_INT) { 1666 Register dest = result->as_register(); 1667 if (Assembler::is_simm13(opr1->as_jint())) { 1668 __ delayed()->or3(G0, opr1->as_jint(), dest); 1669 } else { 1670 // the sethi has been done above, so just put in the low 10 bits 1671 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1672 } 1673 } else { 1674 // can't do anything useful in the delay slot 1675 __ delayed()->nop(); 1676 } 1677 if (opr2->is_constant()) { 1678 const2reg(opr2, result, lir_patch_none, NULL); 1679 } else if (opr2->is_register()) { 1680 reg2reg(opr2, result); 1681 } else if (opr2->is_stack()) { 1682 stack2reg(opr2, result, result->type()); 1683 } else { 1684 ShouldNotReachHere(); 1685 } 1686 __ bind(skip); 1687 } 1688 1689 1690 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1691 assert(info == NULL, "unused on this code path"); 1692 assert(left->is_register(), "wrong items state"); 1693 assert(dest->is_register(), "wrong items state"); 1694 1695 if (right->is_register()) { 1696 if (dest->is_float_kind()) { 1697 1698 FloatRegister lreg, rreg, res; 1699 FloatRegisterImpl::Width w; 1700 if (right->is_single_fpu()) { 1701 w = FloatRegisterImpl::S; 1702 lreg = left->as_float_reg(); 1703 rreg = right->as_float_reg(); 1704 res = dest->as_float_reg(); 1705 } else { 1706 w = FloatRegisterImpl::D; 1707 lreg = left->as_double_reg(); 1708 rreg = right->as_double_reg(); 1709 res = dest->as_double_reg(); 1710 } 1711 1712 switch (code) { 1713 case lir_add: __ fadd(w, lreg, rreg, res); break; 1714 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1715 case lir_mul: // fall through 1716 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1717 case lir_div: // fall through 1718 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1719 default: ShouldNotReachHere(); 1720 } 1721 1722 } else if (dest->is_double_cpu()) { 1723 #ifdef _LP64 1724 Register dst_lo = dest->as_register_lo(); 1725 Register op1_lo = left->as_pointer_register(); 1726 Register op2_lo = right->as_pointer_register(); 1727 1728 switch (code) { 1729 case lir_add: 1730 __ add(op1_lo, op2_lo, dst_lo); 1731 break; 1732 1733 case lir_sub: 1734 __ sub(op1_lo, op2_lo, dst_lo); 1735 break; 1736 1737 default: ShouldNotReachHere(); 1738 } 1739 #else 1740 Register op1_lo = left->as_register_lo(); 1741 Register op1_hi = left->as_register_hi(); 1742 Register op2_lo = right->as_register_lo(); 1743 Register op2_hi = right->as_register_hi(); 1744 Register dst_lo = dest->as_register_lo(); 1745 Register dst_hi = dest->as_register_hi(); 1746 1747 switch (code) { 1748 case lir_add: 1749 __ addcc(op1_lo, op2_lo, dst_lo); 1750 __ addc (op1_hi, op2_hi, dst_hi); 1751 break; 1752 1753 case lir_sub: 1754 __ subcc(op1_lo, op2_lo, dst_lo); 1755 __ subc (op1_hi, op2_hi, dst_hi); 1756 break; 1757 1758 default: ShouldNotReachHere(); 1759 } 1760 #endif 1761 } else { 1762 assert (right->is_single_cpu(), "Just Checking"); 1763 1764 Register lreg = left->as_register(); 1765 Register res = dest->as_register(); 1766 Register rreg = right->as_register(); 1767 switch (code) { 1768 case lir_add: __ add (lreg, rreg, res); break; 1769 case lir_sub: __ sub (lreg, rreg, res); break; 1770 case lir_mul: __ mulx (lreg, rreg, res); break; 1771 default: ShouldNotReachHere(); 1772 } 1773 } 1774 } else { 1775 assert (right->is_constant(), "must be constant"); 1776 1777 if (dest->is_single_cpu()) { 1778 Register lreg = left->as_register(); 1779 Register res = dest->as_register(); 1780 int simm13 = right->as_constant_ptr()->as_jint(); 1781 1782 switch (code) { 1783 case lir_add: __ add (lreg, simm13, res); break; 1784 case lir_sub: __ sub (lreg, simm13, res); break; 1785 case lir_mul: __ mulx (lreg, simm13, res); break; 1786 default: ShouldNotReachHere(); 1787 } 1788 } else { 1789 Register lreg = left->as_pointer_register(); 1790 Register res = dest->as_register_lo(); 1791 long con = right->as_constant_ptr()->as_jlong(); 1792 assert(Assembler::is_simm13(con), "must be simm13"); 1793 1794 switch (code) { 1795 case lir_add: __ add (lreg, (int)con, res); break; 1796 case lir_sub: __ sub (lreg, (int)con, res); break; 1797 case lir_mul: __ mulx (lreg, (int)con, res); break; 1798 default: ShouldNotReachHere(); 1799 } 1800 } 1801 } 1802 } 1803 1804 1805 void LIR_Assembler::fpop() { 1806 // do nothing 1807 } 1808 1809 1810 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1811 switch (code) { 1812 case lir_tan: { 1813 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1814 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1815 break; 1816 } 1817 case lir_sqrt: { 1818 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1819 FloatRegister src_reg = value->as_double_reg(); 1820 FloatRegister dst_reg = dest->as_double_reg(); 1821 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1822 break; 1823 } 1824 case lir_abs: { 1825 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1826 FloatRegister src_reg = value->as_double_reg(); 1827 FloatRegister dst_reg = dest->as_double_reg(); 1828 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1829 break; 1830 } 1831 default: { 1832 ShouldNotReachHere(); 1833 break; 1834 } 1835 } 1836 } 1837 1838 1839 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1840 if (right->is_constant()) { 1841 if (dest->is_single_cpu()) { 1842 int simm13 = right->as_constant_ptr()->as_jint(); 1843 switch (code) { 1844 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1845 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1846 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1847 default: ShouldNotReachHere(); 1848 } 1849 } else { 1850 long c = right->as_constant_ptr()->as_jlong(); 1851 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1852 int simm13 = (int)c; 1853 switch (code) { 1854 case lir_logic_and: 1855 #ifndef _LP64 1856 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1857 #endif 1858 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1859 break; 1860 1861 case lir_logic_or: 1862 #ifndef _LP64 1863 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 1864 #endif 1865 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1866 break; 1867 1868 case lir_logic_xor: 1869 #ifndef _LP64 1870 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 1871 #endif 1872 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1873 break; 1874 1875 default: ShouldNotReachHere(); 1876 } 1877 } 1878 } else { 1879 assert(right->is_register(), "right should be in register"); 1880 1881 if (dest->is_single_cpu()) { 1882 switch (code) { 1883 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1884 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1885 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1886 default: ShouldNotReachHere(); 1887 } 1888 } else { 1889 #ifdef _LP64 1890 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1891 left->as_register_lo(); 1892 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1893 right->as_register_lo(); 1894 1895 switch (code) { 1896 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1897 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1898 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1899 default: ShouldNotReachHere(); 1900 } 1901 #else 1902 switch (code) { 1903 case lir_logic_and: 1904 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1905 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1906 break; 1907 1908 case lir_logic_or: 1909 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1910 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1911 break; 1912 1913 case lir_logic_xor: 1914 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1915 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1916 break; 1917 1918 default: ShouldNotReachHere(); 1919 } 1920 #endif 1921 } 1922 } 1923 } 1924 1925 1926 int LIR_Assembler::shift_amount(BasicType t) { 1927 int elem_size = type2aelembytes(t); 1928 switch (elem_size) { 1929 case 1 : return 0; 1930 case 2 : return 1; 1931 case 4 : return 2; 1932 case 8 : return 3; 1933 } 1934 ShouldNotReachHere(); 1935 return -1; 1936 } 1937 1938 1939 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1940 assert(exceptionOop->as_register() == Oexception, "should match"); 1941 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 1942 1943 info->add_register_oop(exceptionOop); 1944 1945 // reuse the debug info from the safepoint poll for the throw op itself 1946 address pc_for_athrow = __ pc(); 1947 int pc_for_athrow_offset = __ offset(); 1948 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1949 __ set(pc_for_athrow, Oissuing_pc, rspec); 1950 add_call_info(pc_for_athrow_offset, info); // for exception handler 1951 1952 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 1953 __ delayed()->nop(); 1954 } 1955 1956 1957 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1958 assert(exceptionOop->as_register() == Oexception, "should match"); 1959 1960 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 1961 __ delayed()->nop(); 1962 } 1963 1964 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1965 Register src = op->src()->as_register(); 1966 Register dst = op->dst()->as_register(); 1967 Register src_pos = op->src_pos()->as_register(); 1968 Register dst_pos = op->dst_pos()->as_register(); 1969 Register length = op->length()->as_register(); 1970 Register tmp = op->tmp()->as_register(); 1971 Register tmp2 = O7; 1972 1973 int flags = op->flags(); 1974 ciArrayKlass* default_type = op->expected_type(); 1975 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1976 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1977 1978 #ifdef _LP64 1979 // higher 32bits must be null 1980 __ sra(dst_pos, 0, dst_pos); 1981 __ sra(src_pos, 0, src_pos); 1982 __ sra(length, 0, length); 1983 #endif 1984 1985 // set up the arraycopy stub information 1986 ArrayCopyStub* stub = op->stub(); 1987 1988 // always do stub if no type information is available. it's ok if 1989 // the known type isn't loaded since the code sanity checks 1990 // in debug mode and the type isn't required when we know the exact type 1991 // also check that the type is an array type. 1992 if (op->expected_type() == NULL) { 1993 __ mov(src, O0); 1994 __ mov(src_pos, O1); 1995 __ mov(dst, O2); 1996 __ mov(dst_pos, O3); 1997 __ mov(length, O4); 1998 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1999 2000 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2001 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2002 } else { 2003 #ifndef PRODUCT 2004 if (PrintC1Statistics) { 2005 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 2006 __ inc_counter(counter, G1, G3); 2007 } 2008 #endif 2009 __ call_VM_leaf(tmp, copyfunc_addr); 2010 } 2011 2012 if (copyfunc_addr != NULL) { 2013 __ xor3(O0, -1, tmp); 2014 __ sub(length, tmp, length); 2015 __ add(src_pos, tmp, src_pos); 2016 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2017 __ delayed()->add(dst_pos, tmp, dst_pos); 2018 } else { 2019 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2020 __ delayed()->nop(); 2021 } 2022 __ bind(*stub->continuation()); 2023 return; 2024 } 2025 2026 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2027 2028 // make sure src and dst are non-null and load array length 2029 if (flags & LIR_OpArrayCopy::src_null_check) { 2030 __ tst(src); 2031 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2032 __ delayed()->nop(); 2033 } 2034 2035 if (flags & LIR_OpArrayCopy::dst_null_check) { 2036 __ tst(dst); 2037 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2038 __ delayed()->nop(); 2039 } 2040 2041 // If the compiler was not able to prove that exact type of the source or the destination 2042 // of the arraycopy is an array type, check at runtime if the source or the destination is 2043 // an instance type. 2044 if (flags & LIR_OpArrayCopy::type_check) { 2045 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2046 __ load_klass(dst, tmp); 2047 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2048 __ cmp(tmp2, Klass::_lh_neutral_value); 2049 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2050 __ delayed()->nop(); 2051 } 2052 2053 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2054 __ load_klass(src, tmp); 2055 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2056 __ cmp(tmp2, Klass::_lh_neutral_value); 2057 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2058 __ delayed()->nop(); 2059 } 2060 } 2061 2062 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2063 // test src_pos register 2064 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 2065 __ delayed()->nop(); 2066 } 2067 2068 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2069 // test dst_pos register 2070 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 2071 __ delayed()->nop(); 2072 } 2073 2074 if (flags & LIR_OpArrayCopy::length_positive_check) { 2075 // make sure length isn't negative 2076 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 2077 __ delayed()->nop(); 2078 } 2079 2080 if (flags & LIR_OpArrayCopy::src_range_check) { 2081 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2082 __ add(length, src_pos, tmp); 2083 __ cmp(tmp2, tmp); 2084 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2085 __ delayed()->nop(); 2086 } 2087 2088 if (flags & LIR_OpArrayCopy::dst_range_check) { 2089 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2090 __ add(length, dst_pos, tmp); 2091 __ cmp(tmp2, tmp); 2092 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2093 __ delayed()->nop(); 2094 } 2095 2096 int shift = shift_amount(basic_type); 2097 2098 if (flags & LIR_OpArrayCopy::type_check) { 2099 // We don't know the array types are compatible 2100 if (basic_type != T_OBJECT) { 2101 // Simple test for basic type arrays 2102 if (UseCompressedClassPointers) { 2103 // We don't need decode because we just need to compare 2104 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2105 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2106 __ cmp(tmp, tmp2); 2107 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2108 } else { 2109 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2110 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2111 __ cmp(tmp, tmp2); 2112 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2113 } 2114 __ delayed()->nop(); 2115 } else { 2116 // For object arrays, if src is a sub class of dst then we can 2117 // safely do the copy. 2118 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2119 2120 Label cont, slow; 2121 assert_different_registers(tmp, tmp2, G3, G1); 2122 2123 __ load_klass(src, G3); 2124 __ load_klass(dst, G1); 2125 2126 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2127 2128 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2129 __ delayed()->nop(); 2130 2131 __ cmp(G3, 0); 2132 if (copyfunc_addr != NULL) { // use stub if available 2133 // src is not a sub class of dst so we have to do a 2134 // per-element check. 2135 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2136 __ delayed()->nop(); 2137 2138 __ bind(slow); 2139 2140 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2141 if ((flags & mask) != mask) { 2142 // Check that at least both of them object arrays. 2143 assert(flags & mask, "one of the two should be known to be an object array"); 2144 2145 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2146 __ load_klass(src, tmp); 2147 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2148 __ load_klass(dst, tmp); 2149 } 2150 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2151 2152 __ lduw(tmp, lh_offset, tmp2); 2153 2154 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2155 __ set(objArray_lh, tmp); 2156 __ cmp(tmp, tmp2); 2157 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2158 __ delayed()->nop(); 2159 } 2160 2161 Register src_ptr = O0; 2162 Register dst_ptr = O1; 2163 Register len = O2; 2164 Register chk_off = O3; 2165 Register super_k = O4; 2166 2167 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2168 if (shift == 0) { 2169 __ add(src_ptr, src_pos, src_ptr); 2170 } else { 2171 __ sll(src_pos, shift, tmp); 2172 __ add(src_ptr, tmp, src_ptr); 2173 } 2174 2175 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2176 if (shift == 0) { 2177 __ add(dst_ptr, dst_pos, dst_ptr); 2178 } else { 2179 __ sll(dst_pos, shift, tmp); 2180 __ add(dst_ptr, tmp, dst_ptr); 2181 } 2182 __ mov(length, len); 2183 __ load_klass(dst, tmp); 2184 2185 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2186 __ ld_ptr(tmp, ek_offset, super_k); 2187 2188 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2189 __ lduw(super_k, sco_offset, chk_off); 2190 2191 __ call_VM_leaf(tmp, copyfunc_addr); 2192 2193 #ifndef PRODUCT 2194 if (PrintC1Statistics) { 2195 Label failed; 2196 __ br_notnull_short(O0, Assembler::pn, failed); 2197 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2198 __ bind(failed); 2199 } 2200 #endif 2201 2202 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2203 __ delayed()->xor3(O0, -1, tmp); 2204 2205 #ifndef PRODUCT 2206 if (PrintC1Statistics) { 2207 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2208 } 2209 #endif 2210 2211 __ sub(length, tmp, length); 2212 __ add(src_pos, tmp, src_pos); 2213 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2214 __ delayed()->add(dst_pos, tmp, dst_pos); 2215 2216 __ bind(cont); 2217 } else { 2218 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2219 __ delayed()->nop(); 2220 __ bind(cont); 2221 } 2222 } 2223 } 2224 2225 #ifdef ASSERT 2226 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2227 // Sanity check the known type with the incoming class. For the 2228 // primitive case the types must match exactly with src.klass and 2229 // dst.klass each exactly matching the default type. For the 2230 // object array case, if no type check is needed then either the 2231 // dst type is exactly the expected type and the src type is a 2232 // subtype which we can't check or src is the same array as dst 2233 // but not necessarily exactly of type default_type. 2234 Label known_ok, halt; 2235 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2236 if (UseCompressedClassPointers) { 2237 // tmp holds the default type. It currently comes uncompressed after the 2238 // load of a constant, so encode it. 2239 __ encode_klass_not_null(tmp); 2240 // load the raw value of the dst klass, since we will be comparing 2241 // uncompressed values directly. 2242 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2243 if (basic_type != T_OBJECT) { 2244 __ cmp(tmp, tmp2); 2245 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2246 // load the raw value of the src klass. 2247 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2248 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2249 } else { 2250 __ cmp(tmp, tmp2); 2251 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2252 __ delayed()->cmp(src, dst); 2253 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2254 __ delayed()->nop(); 2255 } 2256 } else { 2257 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2258 if (basic_type != T_OBJECT) { 2259 __ cmp(tmp, tmp2); 2260 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2261 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2262 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2263 } else { 2264 __ cmp(tmp, tmp2); 2265 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2266 __ delayed()->cmp(src, dst); 2267 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2268 __ delayed()->nop(); 2269 } 2270 } 2271 __ bind(halt); 2272 __ stop("incorrect type information in arraycopy"); 2273 __ bind(known_ok); 2274 } 2275 #endif 2276 2277 #ifndef PRODUCT 2278 if (PrintC1Statistics) { 2279 address counter = Runtime1::arraycopy_count_address(basic_type); 2280 __ inc_counter(counter, G1, G3); 2281 } 2282 #endif 2283 2284 Register src_ptr = O0; 2285 Register dst_ptr = O1; 2286 Register len = O2; 2287 2288 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2289 if (shift == 0) { 2290 __ add(src_ptr, src_pos, src_ptr); 2291 } else { 2292 __ sll(src_pos, shift, tmp); 2293 __ add(src_ptr, tmp, src_ptr); 2294 } 2295 2296 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2297 if (shift == 0) { 2298 __ add(dst_ptr, dst_pos, dst_ptr); 2299 } else { 2300 __ sll(dst_pos, shift, tmp); 2301 __ add(dst_ptr, tmp, dst_ptr); 2302 } 2303 2304 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2305 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2306 const char *name; 2307 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2308 2309 // arraycopy stubs takes a length in number of elements, so don't scale it. 2310 __ mov(length, len); 2311 __ call_VM_leaf(tmp, entry); 2312 2313 __ bind(*stub->continuation()); 2314 } 2315 2316 2317 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2318 if (dest->is_single_cpu()) { 2319 #ifdef _LP64 2320 if (left->type() == T_OBJECT) { 2321 switch (code) { 2322 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2323 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2324 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2325 default: ShouldNotReachHere(); 2326 } 2327 } else 2328 #endif 2329 switch (code) { 2330 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2331 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2332 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2333 default: ShouldNotReachHere(); 2334 } 2335 } else { 2336 #ifdef _LP64 2337 switch (code) { 2338 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2339 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2340 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2341 default: ShouldNotReachHere(); 2342 } 2343 #else 2344 switch (code) { 2345 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2346 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2347 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2348 default: ShouldNotReachHere(); 2349 } 2350 #endif 2351 } 2352 } 2353 2354 2355 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2356 #ifdef _LP64 2357 if (left->type() == T_OBJECT) { 2358 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2359 Register l = left->as_register(); 2360 Register d = dest->as_register_lo(); 2361 switch (code) { 2362 case lir_shl: __ sllx (l, count, d); break; 2363 case lir_shr: __ srax (l, count, d); break; 2364 case lir_ushr: __ srlx (l, count, d); break; 2365 default: ShouldNotReachHere(); 2366 } 2367 return; 2368 } 2369 #endif 2370 2371 if (dest->is_single_cpu()) { 2372 count = count & 0x1F; // Java spec 2373 switch (code) { 2374 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2375 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2376 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2377 default: ShouldNotReachHere(); 2378 } 2379 } else if (dest->is_double_cpu()) { 2380 count = count & 63; // Java spec 2381 switch (code) { 2382 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2383 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2384 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2385 default: ShouldNotReachHere(); 2386 } 2387 } else { 2388 ShouldNotReachHere(); 2389 } 2390 } 2391 2392 2393 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2394 assert(op->tmp1()->as_register() == G1 && 2395 op->tmp2()->as_register() == G3 && 2396 op->tmp3()->as_register() == G4 && 2397 op->obj()->as_register() == O0 && 2398 op->klass()->as_register() == G5, "must be"); 2399 if (op->init_check()) { 2400 __ ldub(op->klass()->as_register(), 2401 in_bytes(InstanceKlass::init_state_offset()), 2402 op->tmp1()->as_register()); 2403 add_debug_info_for_null_check_here(op->stub()->info()); 2404 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2405 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2406 __ delayed()->nop(); 2407 } 2408 __ allocate_object(op->obj()->as_register(), 2409 op->tmp1()->as_register(), 2410 op->tmp2()->as_register(), 2411 op->tmp3()->as_register(), 2412 op->header_size(), 2413 op->object_size(), 2414 op->klass()->as_register(), 2415 *op->stub()->entry()); 2416 __ bind(*op->stub()->continuation()); 2417 __ verify_oop(op->obj()->as_register()); 2418 } 2419 2420 2421 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2422 assert(op->tmp1()->as_register() == G1 && 2423 op->tmp2()->as_register() == G3 && 2424 op->tmp3()->as_register() == G4 && 2425 op->tmp4()->as_register() == O1 && 2426 op->klass()->as_register() == G5, "must be"); 2427 2428 LP64_ONLY( __ signx(op->len()->as_register()); ) 2429 if (UseSlowPath || 2430 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2431 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2432 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2433 __ delayed()->nop(); 2434 } else { 2435 __ allocate_array(op->obj()->as_register(), 2436 op->len()->as_register(), 2437 op->tmp1()->as_register(), 2438 op->tmp2()->as_register(), 2439 op->tmp3()->as_register(), 2440 arrayOopDesc::header_size(op->type()), 2441 type2aelembytes(op->type()), 2442 op->klass()->as_register(), 2443 *op->stub()->entry()); 2444 } 2445 __ bind(*op->stub()->continuation()); 2446 } 2447 2448 2449 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2450 ciMethodData *md, ciProfileData *data, 2451 Register recv, Register tmp1, Label* update_done) { 2452 uint i; 2453 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2454 Label next_test; 2455 // See if the receiver is receiver[n]. 2456 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2457 mdo_offset_bias); 2458 __ ld_ptr(receiver_addr, tmp1); 2459 __ verify_klass_ptr(tmp1); 2460 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2461 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2462 mdo_offset_bias); 2463 __ ld_ptr(data_addr, tmp1); 2464 __ add(tmp1, DataLayout::counter_increment, tmp1); 2465 __ st_ptr(tmp1, data_addr); 2466 __ ba(*update_done); 2467 __ delayed()->nop(); 2468 __ bind(next_test); 2469 } 2470 2471 // Didn't find receiver; find next empty slot and fill it in 2472 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2473 Label next_test; 2474 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2475 mdo_offset_bias); 2476 __ ld_ptr(recv_addr, tmp1); 2477 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2478 __ st_ptr(recv, recv_addr); 2479 __ set(DataLayout::counter_increment, tmp1); 2480 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2481 mdo_offset_bias); 2482 __ ba(*update_done); 2483 __ delayed()->nop(); 2484 __ bind(next_test); 2485 } 2486 } 2487 2488 2489 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2490 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2491 md = method->method_data_or_null(); 2492 assert(md != NULL, "Sanity"); 2493 data = md->bci_to_data(bci); 2494 assert(data != NULL, "need data for checkcast"); 2495 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2496 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2497 // The offset is large so bias the mdo by the base of the slot so 2498 // that the ld can use simm13s to reference the slots of the data 2499 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2500 } 2501 } 2502 2503 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2504 // we always need a stub for the failure case. 2505 CodeStub* stub = op->stub(); 2506 Register obj = op->object()->as_register(); 2507 Register k_RInfo = op->tmp1()->as_register(); 2508 Register klass_RInfo = op->tmp2()->as_register(); 2509 Register dst = op->result_opr()->as_register(); 2510 Register Rtmp1 = op->tmp3()->as_register(); 2511 ciKlass* k = op->klass(); 2512 2513 2514 if (obj == k_RInfo) { 2515 k_RInfo = klass_RInfo; 2516 klass_RInfo = obj; 2517 } 2518 2519 ciMethodData* md; 2520 ciProfileData* data; 2521 int mdo_offset_bias = 0; 2522 if (op->should_profile()) { 2523 ciMethod* method = op->profiled_method(); 2524 assert(method != NULL, "Should have method"); 2525 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2526 2527 Label not_null; 2528 __ br_notnull_short(obj, Assembler::pn, not_null); 2529 Register mdo = k_RInfo; 2530 Register data_val = Rtmp1; 2531 metadata2reg(md->constant_encoding(), mdo); 2532 if (mdo_offset_bias > 0) { 2533 __ set(mdo_offset_bias, data_val); 2534 __ add(mdo, data_val, mdo); 2535 } 2536 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2537 __ ldub(flags_addr, data_val); 2538 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2539 __ stb(data_val, flags_addr); 2540 __ ba(*obj_is_null); 2541 __ delayed()->nop(); 2542 __ bind(not_null); 2543 } else { 2544 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2545 __ delayed()->nop(); 2546 } 2547 2548 Label profile_cast_failure, profile_cast_success; 2549 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2550 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2551 2552 // patching may screw with our temporaries on sparc, 2553 // so let's do it before loading the class 2554 if (k->is_loaded()) { 2555 metadata2reg(k->constant_encoding(), k_RInfo); 2556 } else { 2557 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2558 } 2559 assert(obj != k_RInfo, "must be different"); 2560 2561 // get object class 2562 // not a safepoint as obj null check happens earlier 2563 __ load_klass(obj, klass_RInfo); 2564 if (op->fast_check()) { 2565 assert_different_registers(klass_RInfo, k_RInfo); 2566 __ cmp(k_RInfo, klass_RInfo); 2567 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2568 __ delayed()->nop(); 2569 } else { 2570 bool need_slow_path = true; 2571 if (k->is_loaded()) { 2572 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2573 need_slow_path = false; 2574 // perform the fast part of the checking logic 2575 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2576 (need_slow_path ? success_target : NULL), 2577 failure_target, NULL, 2578 RegisterOrConstant(k->super_check_offset())); 2579 } else { 2580 // perform the fast part of the checking logic 2581 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2582 failure_target, NULL); 2583 } 2584 if (need_slow_path) { 2585 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2586 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2587 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2588 __ delayed()->nop(); 2589 __ cmp(G3, 0); 2590 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2591 __ delayed()->nop(); 2592 // Fall through to success case 2593 } 2594 } 2595 2596 if (op->should_profile()) { 2597 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2598 assert_different_registers(obj, mdo, recv, tmp1); 2599 __ bind(profile_cast_success); 2600 metadata2reg(md->constant_encoding(), mdo); 2601 if (mdo_offset_bias > 0) { 2602 __ set(mdo_offset_bias, tmp1); 2603 __ add(mdo, tmp1, mdo); 2604 } 2605 __ load_klass(obj, recv); 2606 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2607 // Jump over the failure case 2608 __ ba(*success); 2609 __ delayed()->nop(); 2610 // Cast failure case 2611 __ bind(profile_cast_failure); 2612 metadata2reg(md->constant_encoding(), mdo); 2613 if (mdo_offset_bias > 0) { 2614 __ set(mdo_offset_bias, tmp1); 2615 __ add(mdo, tmp1, mdo); 2616 } 2617 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2618 __ ld_ptr(data_addr, tmp1); 2619 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2620 __ st_ptr(tmp1, data_addr); 2621 __ ba(*failure); 2622 __ delayed()->nop(); 2623 } 2624 __ ba(*success); 2625 __ delayed()->nop(); 2626 } 2627 2628 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2629 LIR_Code code = op->code(); 2630 if (code == lir_store_check) { 2631 Register value = op->object()->as_register(); 2632 Register array = op->array()->as_register(); 2633 Register k_RInfo = op->tmp1()->as_register(); 2634 Register klass_RInfo = op->tmp2()->as_register(); 2635 Register Rtmp1 = op->tmp3()->as_register(); 2636 2637 __ verify_oop(value); 2638 CodeStub* stub = op->stub(); 2639 // check if it needs to be profiled 2640 ciMethodData* md; 2641 ciProfileData* data; 2642 int mdo_offset_bias = 0; 2643 if (op->should_profile()) { 2644 ciMethod* method = op->profiled_method(); 2645 assert(method != NULL, "Should have method"); 2646 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2647 } 2648 Label profile_cast_success, profile_cast_failure, done; 2649 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2650 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2651 2652 if (op->should_profile()) { 2653 Label not_null; 2654 __ br_notnull_short(value, Assembler::pn, not_null); 2655 Register mdo = k_RInfo; 2656 Register data_val = Rtmp1; 2657 metadata2reg(md->constant_encoding(), mdo); 2658 if (mdo_offset_bias > 0) { 2659 __ set(mdo_offset_bias, data_val); 2660 __ add(mdo, data_val, mdo); 2661 } 2662 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2663 __ ldub(flags_addr, data_val); 2664 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2665 __ stb(data_val, flags_addr); 2666 __ ba_short(done); 2667 __ bind(not_null); 2668 } else { 2669 __ br_null_short(value, Assembler::pn, done); 2670 } 2671 add_debug_info_for_null_check_here(op->info_for_exception()); 2672 __ load_klass(array, k_RInfo); 2673 __ load_klass(value, klass_RInfo); 2674 2675 // get instance klass 2676 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo); 2677 // perform the fast part of the checking logic 2678 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2679 2680 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2681 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2682 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2683 __ delayed()->nop(); 2684 __ cmp(G3, 0); 2685 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2686 __ delayed()->nop(); 2687 // fall through to the success case 2688 2689 if (op->should_profile()) { 2690 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2691 assert_different_registers(value, mdo, recv, tmp1); 2692 __ bind(profile_cast_success); 2693 metadata2reg(md->constant_encoding(), mdo); 2694 if (mdo_offset_bias > 0) { 2695 __ set(mdo_offset_bias, tmp1); 2696 __ add(mdo, tmp1, mdo); 2697 } 2698 __ load_klass(value, recv); 2699 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2700 __ ba_short(done); 2701 // Cast failure case 2702 __ bind(profile_cast_failure); 2703 metadata2reg(md->constant_encoding(), mdo); 2704 if (mdo_offset_bias > 0) { 2705 __ set(mdo_offset_bias, tmp1); 2706 __ add(mdo, tmp1, mdo); 2707 } 2708 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2709 __ ld_ptr(data_addr, tmp1); 2710 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2711 __ st_ptr(tmp1, data_addr); 2712 __ ba(*stub->entry()); 2713 __ delayed()->nop(); 2714 } 2715 __ bind(done); 2716 } else if (code == lir_checkcast) { 2717 Register obj = op->object()->as_register(); 2718 Register dst = op->result_opr()->as_register(); 2719 Label success; 2720 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2721 __ bind(success); 2722 __ mov(obj, dst); 2723 } else if (code == lir_instanceof) { 2724 Register obj = op->object()->as_register(); 2725 Register dst = op->result_opr()->as_register(); 2726 Label success, failure, done; 2727 emit_typecheck_helper(op, &success, &failure, &failure); 2728 __ bind(failure); 2729 __ set(0, dst); 2730 __ ba_short(done); 2731 __ bind(success); 2732 __ set(1, dst); 2733 __ bind(done); 2734 } else { 2735 ShouldNotReachHere(); 2736 } 2737 2738 } 2739 2740 2741 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2742 if (op->code() == lir_cas_long) { 2743 assert(VM_Version::supports_cx8(), "wrong machine"); 2744 Register addr = op->addr()->as_pointer_register(); 2745 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2746 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2747 Register new_value_lo = op->new_value()->as_register_lo(); 2748 Register new_value_hi = op->new_value()->as_register_hi(); 2749 Register t1 = op->tmp1()->as_register(); 2750 Register t2 = op->tmp2()->as_register(); 2751 #ifdef _LP64 2752 __ mov(cmp_value_lo, t1); 2753 __ mov(new_value_lo, t2); 2754 // perform the compare and swap operation 2755 __ casx(addr, t1, t2); 2756 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2757 // overwritten with the original value in "addr" and will be equal to t1. 2758 __ cmp(t1, t2); 2759 #else 2760 // move high and low halves of long values into single registers 2761 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2762 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2763 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2764 __ sllx(new_value_hi, 32, t2); 2765 __ srl(new_value_lo, 0, new_value_lo); 2766 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2767 // perform the compare and swap operation 2768 __ casx(addr, t1, t2); 2769 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2770 // overwritten with the original value in "addr" and will be equal to t1. 2771 // Produce icc flag for 32bit. 2772 __ sub(t1, t2, t2); 2773 __ srlx(t2, 32, t1); 2774 __ orcc(t2, t1, G0); 2775 #endif 2776 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2777 Register addr = op->addr()->as_pointer_register(); 2778 Register cmp_value = op->cmp_value()->as_register(); 2779 Register new_value = op->new_value()->as_register(); 2780 Register t1 = op->tmp1()->as_register(); 2781 Register t2 = op->tmp2()->as_register(); 2782 __ mov(cmp_value, t1); 2783 __ mov(new_value, t2); 2784 if (op->code() == lir_cas_obj) { 2785 if (UseCompressedOops) { 2786 __ encode_heap_oop(t1); 2787 __ encode_heap_oop(t2); 2788 __ cas(addr, t1, t2); 2789 } else { 2790 __ cas_ptr(addr, t1, t2); 2791 } 2792 } else { 2793 __ cas(addr, t1, t2); 2794 } 2795 __ cmp(t1, t2); 2796 } else { 2797 Unimplemented(); 2798 } 2799 } 2800 2801 void LIR_Assembler::set_24bit_FPU() { 2802 Unimplemented(); 2803 } 2804 2805 2806 void LIR_Assembler::reset_FPU() { 2807 Unimplemented(); 2808 } 2809 2810 2811 void LIR_Assembler::breakpoint() { 2812 __ breakpoint_trap(); 2813 } 2814 2815 2816 void LIR_Assembler::push(LIR_Opr opr) { 2817 Unimplemented(); 2818 } 2819 2820 2821 void LIR_Assembler::pop(LIR_Opr opr) { 2822 Unimplemented(); 2823 } 2824 2825 2826 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2827 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2828 Register dst = dst_opr->as_register(); 2829 Register reg = mon_addr.base(); 2830 int offset = mon_addr.disp(); 2831 // compute pointer to BasicLock 2832 if (mon_addr.is_simm13()) { 2833 __ add(reg, offset, dst); 2834 } else { 2835 __ set(offset, dst); 2836 __ add(dst, reg, dst); 2837 } 2838 } 2839 2840 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2841 assert(op->crc()->is_single_cpu(), "crc must be register"); 2842 assert(op->val()->is_single_cpu(), "byte value must be register"); 2843 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2844 Register crc = op->crc()->as_register(); 2845 Register val = op->val()->as_register(); 2846 Register table = op->result_opr()->as_register(); 2847 Register res = op->result_opr()->as_register(); 2848 2849 assert_different_registers(val, crc, table); 2850 2851 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 2852 __ not1(crc); 2853 __ clruwu(crc); 2854 __ update_byte_crc32(crc, val, table); 2855 __ not1(crc); 2856 2857 __ mov(crc, res); 2858 } 2859 2860 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2861 Register obj = op->obj_opr()->as_register(); 2862 Register hdr = op->hdr_opr()->as_register(); 2863 Register lock = op->lock_opr()->as_register(); 2864 2865 // obj may not be an oop 2866 if (op->code() == lir_lock) { 2867 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2868 if (UseFastLocking) { 2869 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2870 // add debug info for NullPointerException only if one is possible 2871 if (op->info() != NULL) { 2872 add_debug_info_for_null_check_here(op->info()); 2873 } 2874 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2875 } else { 2876 // always do slow locking 2877 // note: the slow locking code could be inlined here, however if we use 2878 // slow locking, speed doesn't matter anyway and this solution is 2879 // simpler and requires less duplicated code - additionally, the 2880 // slow locking code is the same in either case which simplifies 2881 // debugging 2882 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2883 __ delayed()->nop(); 2884 } 2885 } else { 2886 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2887 if (UseFastLocking) { 2888 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2889 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2890 } else { 2891 // always do slow unlocking 2892 // note: the slow unlocking code could be inlined here, however if we use 2893 // slow unlocking, speed doesn't matter anyway and this solution is 2894 // simpler and requires less duplicated code - additionally, the 2895 // slow unlocking code is the same in either case which simplifies 2896 // debugging 2897 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2898 __ delayed()->nop(); 2899 } 2900 } 2901 __ bind(*op->stub()->continuation()); 2902 } 2903 2904 2905 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2906 ciMethod* method = op->profiled_method(); 2907 int bci = op->profiled_bci(); 2908 ciMethod* callee = op->profiled_callee(); 2909 2910 // Update counter for all call types 2911 ciMethodData* md = method->method_data_or_null(); 2912 assert(md != NULL, "Sanity"); 2913 ciProfileData* data = md->bci_to_data(bci); 2914 assert(data->is_CounterData(), "need CounterData for calls"); 2915 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2916 Register mdo = op->mdo()->as_register(); 2917 #ifdef _LP64 2918 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2919 Register tmp1 = op->tmp1()->as_register_lo(); 2920 #else 2921 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2922 Register tmp1 = op->tmp1()->as_register(); 2923 #endif 2924 metadata2reg(md->constant_encoding(), mdo); 2925 int mdo_offset_bias = 0; 2926 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2927 data->size_in_bytes())) { 2928 // The offset is large so bias the mdo by the base of the slot so 2929 // that the ld can use simm13s to reference the slots of the data 2930 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2931 __ set(mdo_offset_bias, O7); 2932 __ add(mdo, O7, mdo); 2933 } 2934 2935 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2936 Bytecodes::Code bc = method->java_code_at_bci(bci); 2937 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 2938 // Perform additional virtual call profiling for invokevirtual and 2939 // invokeinterface bytecodes 2940 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2941 !callee_is_static && // required for optimized MH invokes 2942 C1ProfileVirtualCalls) { 2943 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2944 Register recv = op->recv()->as_register(); 2945 assert_different_registers(mdo, tmp1, recv); 2946 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2947 ciKlass* known_klass = op->known_holder(); 2948 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2949 // We know the type that will be seen at this call site; we can 2950 // statically update the MethodData* rather than needing to do 2951 // dynamic tests on the receiver type 2952 2953 // NOTE: we should probably put a lock around this search to 2954 // avoid collisions by concurrent compilations 2955 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2956 uint i; 2957 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2958 ciKlass* receiver = vc_data->receiver(i); 2959 if (known_klass->equals(receiver)) { 2960 Address data_addr(mdo, md->byte_offset_of_slot(data, 2961 VirtualCallData::receiver_count_offset(i)) - 2962 mdo_offset_bias); 2963 __ ld_ptr(data_addr, tmp1); 2964 __ add(tmp1, DataLayout::counter_increment, tmp1); 2965 __ st_ptr(tmp1, data_addr); 2966 return; 2967 } 2968 } 2969 2970 // Receiver type not found in profile data; select an empty slot 2971 2972 // Note that this is less efficient than it should be because it 2973 // always does a write to the receiver part of the 2974 // VirtualCallData rather than just the first time 2975 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2976 ciKlass* receiver = vc_data->receiver(i); 2977 if (receiver == NULL) { 2978 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2979 mdo_offset_bias); 2980 metadata2reg(known_klass->constant_encoding(), tmp1); 2981 __ st_ptr(tmp1, recv_addr); 2982 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2983 mdo_offset_bias); 2984 __ ld_ptr(data_addr, tmp1); 2985 __ add(tmp1, DataLayout::counter_increment, tmp1); 2986 __ st_ptr(tmp1, data_addr); 2987 return; 2988 } 2989 } 2990 } else { 2991 __ load_klass(recv, recv); 2992 Label update_done; 2993 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2994 // Receiver did not match any saved receiver and there is no empty row for it. 2995 // Increment total counter to indicate polymorphic case. 2996 __ ld_ptr(counter_addr, tmp1); 2997 __ add(tmp1, DataLayout::counter_increment, tmp1); 2998 __ st_ptr(tmp1, counter_addr); 2999 3000 __ bind(update_done); 3001 } 3002 } else { 3003 // Static call 3004 __ ld_ptr(counter_addr, tmp1); 3005 __ add(tmp1, DataLayout::counter_increment, tmp1); 3006 __ st_ptr(tmp1, counter_addr); 3007 } 3008 } 3009 3010 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3011 Register obj = op->obj()->as_register(); 3012 Register tmp1 = op->tmp()->as_pointer_register(); 3013 Register tmp2 = G1; 3014 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3015 ciKlass* exact_klass = op->exact_klass(); 3016 intptr_t current_klass = op->current_klass(); 3017 bool not_null = op->not_null(); 3018 bool no_conflict = op->no_conflict(); 3019 3020 Label update, next, none; 3021 3022 bool do_null = !not_null; 3023 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3024 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3025 3026 assert(do_null || do_update, "why are we here?"); 3027 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3028 3029 __ verify_oop(obj); 3030 3031 if (tmp1 != obj) { 3032 __ mov(obj, tmp1); 3033 } 3034 if (do_null) { 3035 __ br_notnull_short(tmp1, Assembler::pt, update); 3036 if (!TypeEntries::was_null_seen(current_klass)) { 3037 __ ld_ptr(mdo_addr, tmp1); 3038 __ or3(tmp1, TypeEntries::null_seen, tmp1); 3039 __ st_ptr(tmp1, mdo_addr); 3040 } 3041 if (do_update) { 3042 __ ba(next); 3043 __ delayed()->nop(); 3044 } 3045 #ifdef ASSERT 3046 } else { 3047 __ br_notnull_short(tmp1, Assembler::pt, update); 3048 __ stop("unexpect null obj"); 3049 #endif 3050 } 3051 3052 __ bind(update); 3053 3054 if (do_update) { 3055 #ifdef ASSERT 3056 if (exact_klass != NULL) { 3057 Label ok; 3058 __ load_klass(tmp1, tmp1); 3059 metadata2reg(exact_klass->constant_encoding(), tmp2); 3060 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); 3061 __ stop("exact klass and actual klass differ"); 3062 __ bind(ok); 3063 } 3064 #endif 3065 3066 Label do_update; 3067 __ ld_ptr(mdo_addr, tmp2); 3068 3069 if (!no_conflict) { 3070 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3071 if (exact_klass != NULL) { 3072 metadata2reg(exact_klass->constant_encoding(), tmp1); 3073 } else { 3074 __ load_klass(tmp1, tmp1); 3075 } 3076 3077 __ xor3(tmp1, tmp2, tmp1); 3078 __ btst(TypeEntries::type_klass_mask, tmp1); 3079 // klass seen before, nothing to do. The unknown bit may have been 3080 // set already but no need to check. 3081 __ brx(Assembler::zero, false, Assembler::pt, next); 3082 __ delayed()-> 3083 3084 btst(TypeEntries::type_unknown, tmp1); 3085 // already unknown. Nothing to do anymore. 3086 __ brx(Assembler::notZero, false, Assembler::pt, next); 3087 3088 if (TypeEntries::is_type_none(current_klass)) { 3089 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3090 __ brx(Assembler::zero, true, Assembler::pt, do_update); 3091 // first time here. Set profile type. 3092 __ delayed()->or3(tmp2, tmp1, tmp2); 3093 } else { 3094 __ delayed()->nop(); 3095 } 3096 } else { 3097 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3098 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3099 3100 __ btst(TypeEntries::type_unknown, tmp2); 3101 // already unknown. Nothing to do anymore. 3102 __ brx(Assembler::notZero, false, Assembler::pt, next); 3103 __ delayed()->nop(); 3104 } 3105 3106 // different than before. Cannot keep accurate profile. 3107 __ or3(tmp2, TypeEntries::type_unknown, tmp2); 3108 } else { 3109 // There's a single possible klass at this profile point 3110 assert(exact_klass != NULL, "should be"); 3111 if (TypeEntries::is_type_none(current_klass)) { 3112 metadata2reg(exact_klass->constant_encoding(), tmp1); 3113 __ xor3(tmp1, tmp2, tmp1); 3114 __ btst(TypeEntries::type_klass_mask, tmp1); 3115 __ brx(Assembler::zero, false, Assembler::pt, next); 3116 #ifdef ASSERT 3117 3118 { 3119 Label ok; 3120 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3121 __ brx(Assembler::zero, true, Assembler::pt, ok); 3122 __ delayed()->nop(); 3123 3124 __ stop("unexpected profiling mismatch"); 3125 __ bind(ok); 3126 } 3127 // first time here. Set profile type. 3128 __ or3(tmp2, tmp1, tmp2); 3129 #else 3130 // first time here. Set profile type. 3131 __ delayed()->or3(tmp2, tmp1, tmp2); 3132 #endif 3133 3134 } else { 3135 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3136 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3137 3138 // already unknown. Nothing to do anymore. 3139 __ btst(TypeEntries::type_unknown, tmp2); 3140 __ brx(Assembler::notZero, false, Assembler::pt, next); 3141 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); 3142 } 3143 } 3144 3145 __ bind(do_update); 3146 __ st_ptr(tmp2, mdo_addr); 3147 3148 __ bind(next); 3149 } 3150 } 3151 3152 void LIR_Assembler::align_backward_branch_target() { 3153 __ align(OptoLoopAlignment); 3154 } 3155 3156 3157 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 3158 // make sure we are expecting a delay 3159 // this has the side effect of clearing the delay state 3160 // so we can use _masm instead of _masm->delayed() to do the 3161 // code generation. 3162 __ delayed(); 3163 3164 // make sure we only emit one instruction 3165 int offset = code_offset(); 3166 op->delay_op()->emit_code(this); 3167 #ifdef ASSERT 3168 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3169 op->delay_op()->print(); 3170 } 3171 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3172 "only one instruction can go in a delay slot"); 3173 #endif 3174 3175 // we may also be emitting the call info for the instruction 3176 // which we are the delay slot of. 3177 CodeEmitInfo* call_info = op->call_info(); 3178 if (call_info) { 3179 add_call_info(code_offset(), call_info); 3180 } 3181 3182 if (VerifyStackAtCalls) { 3183 _masm->sub(FP, SP, O7); 3184 _masm->cmp(O7, initial_frame_size_in_bytes()); 3185 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3186 } 3187 } 3188 3189 3190 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3191 assert(left->is_register(), "can only handle registers"); 3192 3193 if (left->is_single_cpu()) { 3194 __ neg(left->as_register(), dest->as_register()); 3195 } else if (left->is_single_fpu()) { 3196 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3197 } else if (left->is_double_fpu()) { 3198 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3199 } else { 3200 assert (left->is_double_cpu(), "Must be a long"); 3201 Register Rlow = left->as_register_lo(); 3202 Register Rhi = left->as_register_hi(); 3203 #ifdef _LP64 3204 __ sub(G0, Rlow, dest->as_register_lo()); 3205 #else 3206 __ subcc(G0, Rlow, dest->as_register_lo()); 3207 __ subc (G0, Rhi, dest->as_register_hi()); 3208 #endif 3209 } 3210 } 3211 3212 3213 void LIR_Assembler::fxch(int i) { 3214 Unimplemented(); 3215 } 3216 3217 void LIR_Assembler::fld(int i) { 3218 Unimplemented(); 3219 } 3220 3221 void LIR_Assembler::ffree(int i) { 3222 Unimplemented(); 3223 } 3224 3225 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3226 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3227 3228 // if tmp is invalid, then the function being called doesn't destroy the thread 3229 if (tmp->is_valid()) { 3230 __ save_thread(tmp->as_pointer_register()); 3231 } 3232 __ call(dest, relocInfo::runtime_call_type); 3233 __ delayed()->nop(); 3234 if (info != NULL) { 3235 add_call_info_here(info); 3236 } 3237 if (tmp->is_valid()) { 3238 __ restore_thread(tmp->as_pointer_register()); 3239 } 3240 3241 #ifdef ASSERT 3242 __ verify_thread(); 3243 #endif // ASSERT 3244 } 3245 3246 3247 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3248 #ifdef _LP64 3249 ShouldNotReachHere(); 3250 #endif 3251 3252 NEEDS_CLEANUP; 3253 if (type == T_LONG) { 3254 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3255 3256 // (extended to allow indexed as well as constant displaced for JSR-166) 3257 Register idx = noreg; // contains either constant offset or index 3258 3259 int disp = mem_addr->disp(); 3260 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3261 if (!Assembler::is_simm13(disp)) { 3262 idx = O7; 3263 __ set(disp, idx); 3264 } 3265 } else { 3266 assert(disp == 0, "not both indexed and disp"); 3267 idx = mem_addr->index()->as_register(); 3268 } 3269 3270 int null_check_offset = -1; 3271 3272 Register base = mem_addr->base()->as_register(); 3273 if (src->is_register() && dest->is_address()) { 3274 // G4 is high half, G5 is low half 3275 // clear the top bits of G5, and scale up G4 3276 __ srl (src->as_register_lo(), 0, G5); 3277 __ sllx(src->as_register_hi(), 32, G4); 3278 // combine the two halves into the 64 bits of G4 3279 __ or3(G4, G5, G4); 3280 null_check_offset = __ offset(); 3281 if (idx == noreg) { 3282 __ stx(G4, base, disp); 3283 } else { 3284 __ stx(G4, base, idx); 3285 } 3286 } else if (src->is_address() && dest->is_register()) { 3287 null_check_offset = __ offset(); 3288 if (idx == noreg) { 3289 __ ldx(base, disp, G5); 3290 } else { 3291 __ ldx(base, idx, G5); 3292 } 3293 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3294 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3295 } else { 3296 Unimplemented(); 3297 } 3298 if (info != NULL) { 3299 add_debug_info_for_null_check(null_check_offset, info); 3300 } 3301 3302 } else { 3303 // use normal move for all other volatiles since they don't need 3304 // special handling to remain atomic. 3305 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3306 } 3307 } 3308 3309 void LIR_Assembler::membar() { 3310 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3311 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3312 } 3313 3314 void LIR_Assembler::membar_acquire() { 3315 // no-op on TSO 3316 } 3317 3318 void LIR_Assembler::membar_release() { 3319 // no-op on TSO 3320 } 3321 3322 void LIR_Assembler::membar_loadload() { 3323 // no-op 3324 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3325 } 3326 3327 void LIR_Assembler::membar_storestore() { 3328 // no-op 3329 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3330 } 3331 3332 void LIR_Assembler::membar_loadstore() { 3333 // no-op 3334 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3335 } 3336 3337 void LIR_Assembler::membar_storeload() { 3338 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3339 } 3340 3341 void LIR_Assembler::on_spin_wait() { 3342 Unimplemented(); 3343 } 3344 3345 // Pack two sequential registers containing 32 bit values 3346 // into a single 64 bit register. 3347 // src and src->successor() are packed into dst 3348 // src and dst may be the same register. 3349 // Note: src is destroyed 3350 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3351 Register rs = src->as_register(); 3352 Register rd = dst->as_register_lo(); 3353 __ sllx(rs, 32, rs); 3354 __ srl(rs->successor(), 0, rs->successor()); 3355 __ or3(rs, rs->successor(), rd); 3356 } 3357 3358 // Unpack a 64 bit value in a register into 3359 // two sequential registers. 3360 // src is unpacked into dst and dst->successor() 3361 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3362 Register rs = src->as_register_lo(); 3363 Register rd = dst->as_register_hi(); 3364 assert_different_registers(rs, rd, rd->successor()); 3365 __ srlx(rs, 32, rd); 3366 __ srl (rs, 0, rd->successor()); 3367 } 3368 3369 3370 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 3371 LIR_Address* addr = addr_opr->as_address_ptr(); 3372 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); 3373 3374 if (Assembler::is_simm13(addr->disp())) { 3375 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3376 } else { 3377 __ set(addr->disp(), G3_scratch); 3378 __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register()); 3379 } 3380 } 3381 3382 3383 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3384 assert(result_reg->is_register(), "check"); 3385 __ mov(G2_thread, result_reg->as_register()); 3386 } 3387 3388 #ifdef ASSERT 3389 // emit run-time assertion 3390 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3391 assert(op->code() == lir_assert, "must be"); 3392 3393 if (op->in_opr1()->is_valid()) { 3394 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3395 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3396 } else { 3397 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3398 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3399 } 3400 3401 Label ok; 3402 if (op->condition() != lir_cond_always) { 3403 Assembler::Condition acond; 3404 switch (op->condition()) { 3405 case lir_cond_equal: acond = Assembler::equal; break; 3406 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3407 case lir_cond_less: acond = Assembler::less; break; 3408 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3409 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 3410 case lir_cond_greater: acond = Assembler::greater; break; 3411 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 3412 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 3413 default: ShouldNotReachHere(); 3414 }; 3415 __ br(acond, false, Assembler::pt, ok); 3416 __ delayed()->nop(); 3417 } 3418 if (op->halt()) { 3419 const char* str = __ code_string(op->msg()); 3420 __ stop(str); 3421 } else { 3422 breakpoint(); 3423 } 3424 __ bind(ok); 3425 } 3426 #endif 3427 3428 void LIR_Assembler::peephole(LIR_List* lir) { 3429 LIR_OpList* inst = lir->instructions_list(); 3430 for (int i = 0; i < inst->length(); i++) { 3431 LIR_Op* op = inst->at(i); 3432 switch (op->code()) { 3433 case lir_cond_float_branch: 3434 case lir_branch: { 3435 LIR_OpBranch* branch = op->as_OpBranch(); 3436 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3437 LIR_Op* delay_op = NULL; 3438 // we'd like to be able to pull following instructions into 3439 // this slot but we don't know enough to do it safely yet so 3440 // only optimize block to block control flow. 3441 if (LIRFillDelaySlots && branch->block()) { 3442 LIR_Op* prev = inst->at(i - 1); 3443 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3444 // swap previous instruction into delay slot 3445 inst->at_put(i - 1, op); 3446 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3447 #ifndef PRODUCT 3448 if (LIRTracePeephole) { 3449 tty->print_cr("delayed"); 3450 inst->at(i - 1)->print(); 3451 inst->at(i)->print(); 3452 tty->cr(); 3453 } 3454 #endif 3455 continue; 3456 } 3457 } 3458 3459 if (!delay_op) { 3460 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3461 } 3462 inst->insert_before(i + 1, delay_op); 3463 break; 3464 } 3465 case lir_static_call: 3466 case lir_virtual_call: 3467 case lir_icvirtual_call: 3468 case lir_optvirtual_call: 3469 case lir_dynamic_call: { 3470 LIR_Op* prev = inst->at(i - 1); 3471 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3472 (op->code() != lir_virtual_call || 3473 !prev->result_opr()->is_single_cpu() || 3474 prev->result_opr()->as_register() != O0) && 3475 LIR_Assembler::is_single_instruction(prev)) { 3476 // Only moves without info can be put into the delay slot. 3477 // Also don't allow the setup of the receiver in the delay 3478 // slot for vtable calls. 3479 inst->at_put(i - 1, op); 3480 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3481 #ifndef PRODUCT 3482 if (LIRTracePeephole) { 3483 tty->print_cr("delayed"); 3484 inst->at(i - 1)->print(); 3485 inst->at(i)->print(); 3486 tty->cr(); 3487 } 3488 #endif 3489 } else { 3490 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3491 inst->insert_before(i + 1, delay_op); 3492 i++; 3493 } 3494 3495 #if defined(TIERED) && !defined(_LP64) 3496 // fixup the return value from G1 to O0/O1 for long returns. 3497 // It's done here instead of in LIRGenerator because there's 3498 // such a mismatch between the single reg and double reg 3499 // calling convention. 3500 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3501 if (callop->result_opr() == FrameMap::out_long_opr) { 3502 LIR_OpJavaCall* call; 3503 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3504 for (int a = 0; a < arguments->length(); a++) { 3505 arguments[a] = callop->arguments()[a]; 3506 } 3507 if (op->code() == lir_virtual_call) { 3508 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3509 callop->vtable_offset(), arguments, callop->info()); 3510 } else { 3511 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3512 callop->addr(), arguments, callop->info()); 3513 } 3514 inst->at_put(i - 1, call); 3515 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3516 T_LONG, lir_patch_none, NULL)); 3517 } 3518 #endif 3519 break; 3520 } 3521 } 3522 } 3523 } 3524 3525 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3526 LIR_Address* addr = src->as_address_ptr(); 3527 3528 assert(data == dest, "swap uses only 2 operands"); 3529 assert (code == lir_xchg, "no xadd on sparc"); 3530 3531 if (data->type() == T_INT) { 3532 __ swap(as_Address(addr), data->as_register()); 3533 } else if (data->is_oop()) { 3534 Register obj = data->as_register(); 3535 Register narrow = tmp->as_register(); 3536 #ifdef _LP64 3537 assert(UseCompressedOops, "swap is 32bit only"); 3538 __ encode_heap_oop(obj, narrow); 3539 __ swap(as_Address(addr), narrow); 3540 __ decode_heap_oop(narrow, obj); 3541 #else 3542 __ swap(as_Address(addr), obj); 3543 #endif 3544 } else { 3545 ShouldNotReachHere(); 3546 } 3547 } 3548 3549 #undef __