1 /* 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "memory/barrierSet.hpp" 35 #include "memory/cardTableModRefBS.hpp" 36 #include "nativeInst_sparc.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 #define __ _masm-> 41 42 43 //------------------------------------------------------------ 44 45 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 47 if (opr->is_constant()) { 48 LIR_Const* constant = opr->as_constant_ptr(); 49 switch (constant->type()) { 50 case T_INT: { 51 jint value = constant->as_jint(); 52 return Assembler::is_simm13(value); 53 } 54 55 default: 56 return false; 57 } 58 } 59 return false; 60 } 61 62 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 64 switch (op->code()) { 65 case lir_null_check: 66 return true; 67 68 69 case lir_add: 70 case lir_ushr: 71 case lir_shr: 72 case lir_shl: 73 // integer shifts and adds are always one instruction 74 return op->result_opr()->is_single_cpu(); 75 76 77 case lir_move: { 78 LIR_Op1* op1 = op->as_Op1(); 79 LIR_Opr src = op1->in_opr(); 80 LIR_Opr dst = op1->result_opr(); 81 82 if (src == dst) { 83 NEEDS_CLEANUP; 84 // this works around a problem where moves with the same src and dst 85 // end up in the delay slot and then the assembler swallows the mov 86 // since it has no effect and then it complains because the delay slot 87 // is empty. returning false stops the optimizer from putting this in 88 // the delay slot 89 return false; 90 } 91 92 // don't put moves involving oops into the delay slot since the VerifyOops code 93 // will make it much larger than a single instruction. 94 if (VerifyOops) { 95 return false; 96 } 97 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 100 return false; 101 } 102 103 if (UseCompressedOops) { 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 106 } 107 108 if (dst->is_register()) { 109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 110 return !PatchALot; 111 } else if (src->is_single_stack()) { 112 return true; 113 } 114 } 115 116 if (src->is_register()) { 117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 118 return !PatchALot; 119 } else if (dst->is_single_stack()) { 120 return true; 121 } 122 } 123 124 if (dst->is_register() && 125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 127 return true; 128 } 129 130 return false; 131 } 132 133 default: 134 return false; 135 } 136 ShouldNotReachHere(); 137 } 138 139 140 LIR_Opr LIR_Assembler::receiverOpr() { 141 return FrameMap::O0_oop_opr; 142 } 143 144 145 LIR_Opr LIR_Assembler::incomingReceiverOpr() { 146 return FrameMap::I0_oop_opr; 147 } 148 149 150 LIR_Opr LIR_Assembler::osrBufferPointer() { 151 return FrameMap::I0_opr; 152 } 153 154 155 int LIR_Assembler::initial_frame_size_in_bytes() { 156 return in_bytes(frame_map()->framesize_in_bytes()); 157 } 158 159 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 161 // we fetch the class of the receiver (O0) and compare it with the cached class. 162 // If they do not match we jump to slow case. 163 int LIR_Assembler::check_icache() { 164 int offset = __ offset(); 165 __ inline_cache_check(O0, G5_inline_cache_reg); 166 return offset; 167 } 168 169 170 void LIR_Assembler::osr_entry() { 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 172 // 173 // 1. Create a new compiled activation. 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 175 // at the osr_bci; it is not initialized. 176 // 3. Jump to the continuation address in compiled code to resume execution. 177 178 // OSR entry point 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 181 ValueStack* entry_state = osr_entry->end()->state(); 182 int number_of_locks = entry_state->locks_size(); 183 184 // Create a frame for the compiled activation. 185 __ build_frame(initial_frame_size_in_bytes()); 186 187 // OSR buffer is 188 // 189 // locals[nlocals-1..0] 190 // monitors[number_of_locks-1..0] 191 // 192 // locals is a direct copy of the interpreter frame so in the osr buffer 193 // so first slot in the local array is the last local from the interpreter 194 // and last slot is local[0] (receiver) from the interpreter 195 // 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 198 // in the interpreter frame (the method lock if a sync method) 199 200 // Initialize monitors in the compiled activation. 201 // I0: pointer to osr buffer 202 // 203 // All other registers are dead at this point and the locals will be 204 // copied into place by code emitted in the IR. 205 206 Register OSR_buf = osrBufferPointer()->as_register(); 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 208 int monitor_offset = BytesPerWord * method()->max_locals() + 209 (2 * BytesPerWord) * (number_of_locks - 1); 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 211 // the OSR buffer using 2 word entries: first the lock and then 212 // the oop. 213 for (int i = 0; i < number_of_locks; i++) { 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 215 #ifdef ASSERT 216 // verify the interpreter's monitor has a non-null object 217 { 218 Label L; 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 220 __ cmp_and_br(G0, O7, Assembler::notEqual, false, Assembler::pt, L); 221 __ stop("locked object is NULL"); 222 __ bind(L); 223 } 224 #endif // ASSERT 225 // Copy the lock field into the compiled activation. 226 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 230 } 231 } 232 } 233 234 235 // Optimized Library calls 236 // This is the fast version of java.lang.String.compare; it has not 237 // OSR-entry and therefore, we generate a slow version for OSR's 238 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { 239 Register str0 = left->as_register(); 240 Register str1 = right->as_register(); 241 242 Label Ldone; 243 244 Register result = dst->as_register(); 245 { 246 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0 247 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1 248 // Also, get string0.count-string1.count in o7 and get the condition code set 249 // Note: some instructions have been hoisted for better instruction scheduling 250 251 Register tmp0 = L0; 252 Register tmp1 = L1; 253 Register tmp2 = L2; 254 255 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array 256 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 257 int count_offset = java_lang_String:: count_offset_in_bytes(); 258 259 __ load_heap_oop(str0, value_offset, tmp0); 260 __ ld(str0, offset_offset, tmp2); 261 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 262 __ ld(str0, count_offset, str0); 263 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 264 265 // str1 may be null 266 add_debug_info_for_null_check_here(info); 267 268 __ load_heap_oop(str1, value_offset, tmp1); 269 __ add(tmp0, tmp2, tmp0); 270 271 __ ld(str1, offset_offset, tmp2); 272 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 273 __ ld(str1, count_offset, str1); 274 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 275 __ subcc(str0, str1, O7); 276 __ add(tmp1, tmp2, tmp1); 277 } 278 279 { 280 // Compute the minimum of the string lengths, scale it and store it in limit 281 Register count0 = I0; 282 Register count1 = I1; 283 Register limit = L3; 284 285 Label Lskip; 286 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter 287 __ br(Assembler::greater, true, Assembler::pt, Lskip); 288 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter 289 __ bind(Lskip); 290 291 // If either string is empty (or both of them) the result is the difference in lengths 292 __ cmp(limit, 0); 293 __ br(Assembler::equal, true, Assembler::pn, Ldone); 294 __ delayed()->mov(O7, result); // result is difference in lengths 295 } 296 297 { 298 // Neither string is empty 299 Label Lloop; 300 301 Register base0 = L0; 302 Register base1 = L1; 303 Register chr0 = I0; 304 Register chr1 = I1; 305 Register limit = L3; 306 307 // Shift base0 and base1 to the end of the arrays, negate limit 308 __ add(base0, limit, base0); 309 __ add(base1, limit, base1); 310 __ neg(limit); // limit = -min{string0.count, strin1.count} 311 312 __ lduh(base0, limit, chr0); 313 __ bind(Lloop); 314 __ lduh(base1, limit, chr1); 315 __ subcc(chr0, chr1, chr0); 316 __ br(Assembler::notZero, false, Assembler::pn, Ldone); 317 assert(chr0 == result, "result must be pre-placed"); 318 __ delayed()->inccc(limit, sizeof(jchar)); 319 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 320 __ delayed()->lduh(base0, limit, chr0); 321 } 322 323 // If strings are equal up to min length, return the length difference. 324 __ mov(O7, result); 325 326 // Otherwise, return the difference between the first mismatched chars. 327 __ bind(Ldone); 328 } 329 330 331 // -------------------------------------------------------------------------------------------- 332 333 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 334 if (!GenerateSynchronizationCode) return; 335 336 Register obj_reg = obj_opr->as_register(); 337 Register lock_reg = lock_opr->as_register(); 338 339 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 340 Register reg = mon_addr.base(); 341 int offset = mon_addr.disp(); 342 // compute pointer to BasicLock 343 if (mon_addr.is_simm13()) { 344 __ add(reg, offset, lock_reg); 345 } 346 else { 347 __ set(offset, lock_reg); 348 __ add(reg, lock_reg, lock_reg); 349 } 350 // unlock object 351 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 352 // _slow_case_stubs->append(slow_case); 353 // temporary fix: must be created after exceptionhandler, therefore as call stub 354 _slow_case_stubs->append(slow_case); 355 if (UseFastLocking) { 356 // try inlined fast unlocking first, revert to slow locking if it fails 357 // note: lock_reg points to the displaced header since the displaced header offset is 0! 358 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 359 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 360 } else { 361 // always do slow unlocking 362 // note: the slow unlocking code could be inlined here, however if we use 363 // slow unlocking, speed doesn't matter anyway and this solution is 364 // simpler and requires less duplicated code - additionally, the 365 // slow unlocking code is the same in either case which simplifies 366 // debugging 367 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 368 __ delayed()->nop(); 369 } 370 // done 371 __ bind(*slow_case->continuation()); 372 } 373 374 375 int LIR_Assembler::emit_exception_handler() { 376 // if the last instruction is a call (typically to do a throw which 377 // is coming at the end after block reordering) the return address 378 // must still point into the code area in order to avoid assertion 379 // failures when searching for the corresponding bci => add a nop 380 // (was bug 5/14/1999 - gri) 381 __ nop(); 382 383 // generate code for exception handler 384 ciMethod* method = compilation()->method(); 385 386 address handler_base = __ start_a_stub(exception_handler_size); 387 388 if (handler_base == NULL) { 389 // not enough space left for the handler 390 bailout("exception handler overflow"); 391 return -1; 392 } 393 394 int offset = code_offset(); 395 396 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 397 __ delayed()->nop(); 398 __ should_not_reach_here(); 399 assert(code_offset() - offset <= exception_handler_size, "overflow"); 400 __ end_a_stub(); 401 402 return offset; 403 } 404 405 406 // Emit the code to remove the frame from the stack in the exception 407 // unwind path. 408 int LIR_Assembler::emit_unwind_handler() { 409 #ifndef PRODUCT 410 if (CommentedAssembly) { 411 _masm->block_comment("Unwind handler"); 412 } 413 #endif 414 415 int offset = code_offset(); 416 417 // Fetch the exception from TLS and clear out exception related thread state 418 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 419 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 420 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 421 422 __ bind(_unwind_handler_entry); 423 __ verify_not_null_oop(O0); 424 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 425 __ mov(O0, I0); // Preserve the exception 426 } 427 428 // Preform needed unlocking 429 MonitorExitStub* stub = NULL; 430 if (method()->is_synchronized()) { 431 monitor_address(0, FrameMap::I1_opr); 432 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 433 __ unlock_object(I3, I2, I1, *stub->entry()); 434 __ bind(*stub->continuation()); 435 } 436 437 if (compilation()->env()->dtrace_method_probes()) { 438 __ mov(G2_thread, O0); 439 jobject2reg(method()->constant_encoding(), O1); 440 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 441 __ delayed()->nop(); 442 } 443 444 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 445 __ mov(I0, O0); // Restore the exception 446 } 447 448 // dispatch to the unwind logic 449 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 450 __ delayed()->nop(); 451 452 // Emit the slow path assembly 453 if (stub != NULL) { 454 stub->emit_code(this); 455 } 456 457 return offset; 458 } 459 460 461 int LIR_Assembler::emit_deopt_handler() { 462 // if the last instruction is a call (typically to do a throw which 463 // is coming at the end after block reordering) the return address 464 // must still point into the code area in order to avoid assertion 465 // failures when searching for the corresponding bci => add a nop 466 // (was bug 5/14/1999 - gri) 467 __ nop(); 468 469 // generate code for deopt handler 470 ciMethod* method = compilation()->method(); 471 address handler_base = __ start_a_stub(deopt_handler_size); 472 if (handler_base == NULL) { 473 // not enough space left for the handler 474 bailout("deopt handler overflow"); 475 return -1; 476 } 477 478 int offset = code_offset(); 479 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 480 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 481 __ delayed()->nop(); 482 assert(code_offset() - offset <= deopt_handler_size, "overflow"); 483 debug_only(__ stop("should have gone to the caller");) 484 __ end_a_stub(); 485 486 return offset; 487 } 488 489 490 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 491 if (o == NULL) { 492 __ set(NULL_WORD, reg); 493 } else { 494 int oop_index = __ oop_recorder()->find_index(o); 495 RelocationHolder rspec = oop_Relocation::spec(oop_index); 496 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 497 } 498 } 499 500 501 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 502 // Allocate a new index in oop table to hold the oop once it's been patched 503 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); 504 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); 505 506 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 507 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 508 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 509 // NULL will be dynamically patched later and the patched value may be large. We must 510 // therefore generate the sethi/add as a placeholders 511 __ patchable_set(addrlit, reg); 512 513 patching_epilog(patch, lir_patch_normal, reg, info); 514 } 515 516 517 void LIR_Assembler::emit_op3(LIR_Op3* op) { 518 Register Rdividend = op->in_opr1()->as_register(); 519 Register Rdivisor = noreg; 520 Register Rscratch = op->in_opr3()->as_register(); 521 Register Rresult = op->result_opr()->as_register(); 522 int divisor = -1; 523 524 if (op->in_opr2()->is_register()) { 525 Rdivisor = op->in_opr2()->as_register(); 526 } else { 527 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 528 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 529 } 530 531 assert(Rdividend != Rscratch, ""); 532 assert(Rdivisor != Rscratch, ""); 533 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 534 535 if (Rdivisor == noreg && is_power_of_2(divisor)) { 536 // convert division by a power of two into some shifts and logical operations 537 if (op->code() == lir_idiv) { 538 if (divisor == 2) { 539 __ srl(Rdividend, 31, Rscratch); 540 } else { 541 __ sra(Rdividend, 31, Rscratch); 542 __ and3(Rscratch, divisor - 1, Rscratch); 543 } 544 __ add(Rdividend, Rscratch, Rscratch); 545 __ sra(Rscratch, log2_intptr(divisor), Rresult); 546 return; 547 } else { 548 if (divisor == 2) { 549 __ srl(Rdividend, 31, Rscratch); 550 } else { 551 __ sra(Rdividend, 31, Rscratch); 552 __ and3(Rscratch, divisor - 1,Rscratch); 553 } 554 __ add(Rdividend, Rscratch, Rscratch); 555 __ andn(Rscratch, divisor - 1,Rscratch); 556 __ sub(Rdividend, Rscratch, Rresult); 557 return; 558 } 559 } 560 561 __ sra(Rdividend, 31, Rscratch); 562 __ wry(Rscratch); 563 if (!VM_Version::v9_instructions_work()) { 564 // v9 doesn't require these nops 565 __ nop(); 566 __ nop(); 567 __ nop(); 568 __ nop(); 569 } 570 571 add_debug_info_for_div0_here(op->info()); 572 573 if (Rdivisor != noreg) { 574 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 575 } else { 576 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 577 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 578 } 579 580 Label skip; 581 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 582 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 583 __ bind(skip); 584 585 if (op->code() == lir_irem) { 586 if (Rdivisor != noreg) { 587 __ smul(Rscratch, Rdivisor, Rscratch); 588 } else { 589 __ smul(Rscratch, divisor, Rscratch); 590 } 591 __ sub(Rdividend, Rscratch, Rresult); 592 } 593 } 594 595 596 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 597 #ifdef ASSERT 598 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 599 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 600 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 601 #endif 602 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 603 604 if (op->cond() == lir_cond_always) { 605 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 606 } else if (op->code() == lir_cond_float_branch) { 607 assert(op->ublock() != NULL, "must have unordered successor"); 608 bool is_unordered = (op->ublock() == op->block()); 609 Assembler::Condition acond; 610 switch (op->cond()) { 611 case lir_cond_equal: acond = Assembler::f_equal; break; 612 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 613 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 614 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 615 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 616 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 617 default : ShouldNotReachHere(); 618 }; 619 620 if (!VM_Version::v9_instructions_work()) { 621 __ nop(); 622 } 623 __ fb( acond, false, Assembler::pn, *(op->label())); 624 } else { 625 assert (op->code() == lir_branch, "just checking"); 626 627 Assembler::Condition acond; 628 switch (op->cond()) { 629 case lir_cond_equal: acond = Assembler::equal; break; 630 case lir_cond_notEqual: acond = Assembler::notEqual; break; 631 case lir_cond_less: acond = Assembler::less; break; 632 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 633 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 634 case lir_cond_greater: acond = Assembler::greater; break; 635 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 636 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 637 default: ShouldNotReachHere(); 638 }; 639 640 // sparc has different condition codes for testing 32-bit 641 // vs. 64-bit values. We could always test xcc is we could 642 // guarantee that 32-bit loads always sign extended but that isn't 643 // true and since sign extension isn't free, it would impose a 644 // slight cost. 645 #ifdef _LP64 646 if (op->type() == T_INT) { 647 __ br(acond, false, Assembler::pn, *(op->label())); 648 } else 649 #endif 650 __ brx(acond, false, Assembler::pn, *(op->label())); 651 } 652 // The peephole pass fills the delay slot 653 } 654 655 656 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 657 Bytecodes::Code code = op->bytecode(); 658 LIR_Opr dst = op->result_opr(); 659 660 switch(code) { 661 case Bytecodes::_i2l: { 662 Register rlo = dst->as_register_lo(); 663 Register rhi = dst->as_register_hi(); 664 Register rval = op->in_opr()->as_register(); 665 #ifdef _LP64 666 __ sra(rval, 0, rlo); 667 #else 668 __ mov(rval, rlo); 669 __ sra(rval, BitsPerInt-1, rhi); 670 #endif 671 break; 672 } 673 case Bytecodes::_i2d: 674 case Bytecodes::_i2f: { 675 bool is_double = (code == Bytecodes::_i2d); 676 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 677 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 678 FloatRegister rsrc = op->in_opr()->as_float_reg(); 679 if (rsrc != rdst) { 680 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 681 } 682 __ fitof(w, rdst, rdst); 683 break; 684 } 685 case Bytecodes::_f2i:{ 686 FloatRegister rsrc = op->in_opr()->as_float_reg(); 687 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 688 Label L; 689 // result must be 0 if value is NaN; test by comparing value to itself 690 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 691 if (!VM_Version::v9_instructions_work()) { 692 __ nop(); 693 } 694 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 695 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 696 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 697 // move integer result from float register to int register 698 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 699 __ bind (L); 700 break; 701 } 702 case Bytecodes::_l2i: { 703 Register rlo = op->in_opr()->as_register_lo(); 704 Register rhi = op->in_opr()->as_register_hi(); 705 Register rdst = dst->as_register(); 706 #ifdef _LP64 707 __ sra(rlo, 0, rdst); 708 #else 709 __ mov(rlo, rdst); 710 #endif 711 break; 712 } 713 case Bytecodes::_d2f: 714 case Bytecodes::_f2d: { 715 bool is_double = (code == Bytecodes::_f2d); 716 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 717 LIR_Opr val = op->in_opr(); 718 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 719 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 720 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 721 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 722 __ ftof(vw, dw, rval, rdst); 723 break; 724 } 725 case Bytecodes::_i2s: 726 case Bytecodes::_i2b: { 727 Register rval = op->in_opr()->as_register(); 728 Register rdst = dst->as_register(); 729 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 730 __ sll (rval, shift, rdst); 731 __ sra (rdst, shift, rdst); 732 break; 733 } 734 case Bytecodes::_i2c: { 735 Register rval = op->in_opr()->as_register(); 736 Register rdst = dst->as_register(); 737 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 738 __ sll (rval, shift, rdst); 739 __ srl (rdst, shift, rdst); 740 break; 741 } 742 743 default: ShouldNotReachHere(); 744 } 745 } 746 747 748 void LIR_Assembler::align_call(LIR_Code) { 749 // do nothing since all instructions are word aligned on sparc 750 } 751 752 753 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 754 __ call(op->addr(), rtype); 755 // The peephole pass fills the delay slot, add_call_info is done in 756 // LIR_Assembler::emit_delay. 757 } 758 759 760 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 761 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); 762 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); 763 __ relocate(rspec); 764 __ call(op->addr(), relocInfo::none); 765 // The peephole pass fills the delay slot, add_call_info is done in 766 // LIR_Assembler::emit_delay. 767 } 768 769 770 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 771 add_debug_info_for_null_check_here(op->info()); 772 __ load_klass(O0, G3_scratch); 773 if (__ is_simm13(op->vtable_offset())) { 774 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 775 } else { 776 // This will generate 2 instructions 777 __ set(op->vtable_offset(), G5_method); 778 // ld_ptr, set_hi, set 779 __ ld_ptr(G3_scratch, G5_method, G5_method); 780 } 781 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); 782 __ callr(G3_scratch, G0); 783 // the peephole pass fills the delay slot 784 } 785 786 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 787 int store_offset; 788 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 789 assert(!unaligned, "can't handle this"); 790 // for offsets larger than a simm13 we setup the offset in O7 791 __ set(offset, O7); 792 store_offset = store(from_reg, base, O7, type, wide); 793 } else { 794 if (type == T_ARRAY || type == T_OBJECT) { 795 __ verify_oop(from_reg->as_register()); 796 } 797 store_offset = code_offset(); 798 switch (type) { 799 case T_BOOLEAN: // fall through 800 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 801 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 802 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 803 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 804 case T_LONG : 805 #ifdef _LP64 806 if (unaligned || PatchALot) { 807 __ srax(from_reg->as_register_lo(), 32, O7); 808 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 809 __ stw(O7, base, offset + hi_word_offset_in_bytes); 810 } else { 811 __ stx(from_reg->as_register_lo(), base, offset); 812 } 813 #else 814 assert(Assembler::is_simm13(offset + 4), "must be"); 815 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 816 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 817 #endif 818 break; 819 case T_ADDRESS: 820 __ st_ptr(from_reg->as_register(), base, offset); 821 break; 822 case T_ARRAY : // fall through 823 case T_OBJECT: 824 { 825 if (UseCompressedOops && !wide) { 826 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 827 store_offset = code_offset(); 828 __ stw(G3_scratch, base, offset); 829 } else { 830 __ st_ptr(from_reg->as_register(), base, offset); 831 } 832 break; 833 } 834 835 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 836 case T_DOUBLE: 837 { 838 FloatRegister reg = from_reg->as_double_reg(); 839 // split unaligned stores 840 if (unaligned || PatchALot) { 841 assert(Assembler::is_simm13(offset + 4), "must be"); 842 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 843 __ stf(FloatRegisterImpl::S, reg, base, offset); 844 } else { 845 __ stf(FloatRegisterImpl::D, reg, base, offset); 846 } 847 break; 848 } 849 default : ShouldNotReachHere(); 850 } 851 } 852 return store_offset; 853 } 854 855 856 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 857 if (type == T_ARRAY || type == T_OBJECT) { 858 __ verify_oop(from_reg->as_register()); 859 } 860 int store_offset = code_offset(); 861 switch (type) { 862 case T_BOOLEAN: // fall through 863 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 864 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 865 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 866 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 867 case T_LONG : 868 #ifdef _LP64 869 __ stx(from_reg->as_register_lo(), base, disp); 870 #else 871 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 872 __ std(from_reg->as_register_hi(), base, disp); 873 #endif 874 break; 875 case T_ADDRESS: 876 __ st_ptr(from_reg->as_register(), base, disp); 877 break; 878 case T_ARRAY : // fall through 879 case T_OBJECT: 880 { 881 if (UseCompressedOops && !wide) { 882 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 883 store_offset = code_offset(); 884 __ stw(G3_scratch, base, disp); 885 } else { 886 __ st_ptr(from_reg->as_register(), base, disp); 887 } 888 break; 889 } 890 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 891 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 892 default : ShouldNotReachHere(); 893 } 894 return store_offset; 895 } 896 897 898 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 899 int load_offset; 900 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 901 assert(base != O7, "destroying register"); 902 assert(!unaligned, "can't handle this"); 903 // for offsets larger than a simm13 we setup the offset in O7 904 __ set(offset, O7); 905 load_offset = load(base, O7, to_reg, type, wide); 906 } else { 907 load_offset = code_offset(); 908 switch(type) { 909 case T_BOOLEAN: // fall through 910 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 911 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 912 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 913 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 914 case T_LONG : 915 if (!unaligned) { 916 #ifdef _LP64 917 __ ldx(base, offset, to_reg->as_register_lo()); 918 #else 919 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 920 "must be sequential"); 921 __ ldd(base, offset, to_reg->as_register_hi()); 922 #endif 923 } else { 924 #ifdef _LP64 925 assert(base != to_reg->as_register_lo(), "can't handle this"); 926 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 927 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 928 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 929 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 930 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 931 #else 932 if (base == to_reg->as_register_lo()) { 933 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 934 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 935 } else { 936 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 937 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 938 } 939 #endif 940 } 941 break; 942 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; 943 case T_ARRAY : // fall through 944 case T_OBJECT: 945 { 946 if (UseCompressedOops && !wide) { 947 __ lduw(base, offset, to_reg->as_register()); 948 __ decode_heap_oop(to_reg->as_register()); 949 } else { 950 __ ld_ptr(base, offset, to_reg->as_register()); 951 } 952 break; 953 } 954 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 955 case T_DOUBLE: 956 { 957 FloatRegister reg = to_reg->as_double_reg(); 958 // split unaligned loads 959 if (unaligned || PatchALot) { 960 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 961 __ ldf(FloatRegisterImpl::S, base, offset, reg); 962 } else { 963 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 964 } 965 break; 966 } 967 default : ShouldNotReachHere(); 968 } 969 if (type == T_ARRAY || type == T_OBJECT) { 970 __ verify_oop(to_reg->as_register()); 971 } 972 } 973 return load_offset; 974 } 975 976 977 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 978 int load_offset = code_offset(); 979 switch(type) { 980 case T_BOOLEAN: // fall through 981 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 982 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 983 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 984 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 985 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 986 case T_ARRAY : // fall through 987 case T_OBJECT: 988 { 989 if (UseCompressedOops && !wide) { 990 __ lduw(base, disp, to_reg->as_register()); 991 __ decode_heap_oop(to_reg->as_register()); 992 } else { 993 __ ld_ptr(base, disp, to_reg->as_register()); 994 } 995 break; 996 } 997 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 998 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 999 case T_LONG : 1000 #ifdef _LP64 1001 __ ldx(base, disp, to_reg->as_register_lo()); 1002 #else 1003 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 1004 "must be sequential"); 1005 __ ldd(base, disp, to_reg->as_register_hi()); 1006 #endif 1007 break; 1008 default : ShouldNotReachHere(); 1009 } 1010 if (type == T_ARRAY || type == T_OBJECT) { 1011 __ verify_oop(to_reg->as_register()); 1012 } 1013 return load_offset; 1014 } 1015 1016 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 1017 LIR_Const* c = src->as_constant_ptr(); 1018 switch (c->type()) { 1019 case T_INT: 1020 case T_FLOAT: { 1021 Register src_reg = O7; 1022 int value = c->as_jint_bits(); 1023 if (value == 0) { 1024 src_reg = G0; 1025 } else { 1026 __ set(value, O7); 1027 } 1028 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1029 __ stw(src_reg, addr.base(), addr.disp()); 1030 break; 1031 } 1032 case T_ADDRESS: { 1033 Register src_reg = O7; 1034 int value = c->as_jint_bits(); 1035 if (value == 0) { 1036 src_reg = G0; 1037 } else { 1038 __ set(value, O7); 1039 } 1040 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1041 __ st_ptr(src_reg, addr.base(), addr.disp()); 1042 break; 1043 } 1044 case T_OBJECT: { 1045 Register src_reg = O7; 1046 jobject2reg(c->as_jobject(), src_reg); 1047 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1048 __ st_ptr(src_reg, addr.base(), addr.disp()); 1049 break; 1050 } 1051 case T_LONG: 1052 case T_DOUBLE: { 1053 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1054 1055 Register tmp = O7; 1056 int value_lo = c->as_jint_lo_bits(); 1057 if (value_lo == 0) { 1058 tmp = G0; 1059 } else { 1060 __ set(value_lo, O7); 1061 } 1062 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 1063 int value_hi = c->as_jint_hi_bits(); 1064 if (value_hi == 0) { 1065 tmp = G0; 1066 } else { 1067 __ set(value_hi, O7); 1068 } 1069 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 1070 break; 1071 } 1072 default: 1073 Unimplemented(); 1074 } 1075 } 1076 1077 1078 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1079 LIR_Const* c = src->as_constant_ptr(); 1080 LIR_Address* addr = dest->as_address_ptr(); 1081 Register base = addr->base()->as_pointer_register(); 1082 int offset = -1; 1083 1084 switch (c->type()) { 1085 case T_INT: 1086 case T_FLOAT: 1087 case T_ADDRESS: { 1088 LIR_Opr tmp = FrameMap::O7_opr; 1089 int value = c->as_jint_bits(); 1090 if (value == 0) { 1091 tmp = FrameMap::G0_opr; 1092 } else if (Assembler::is_simm13(value)) { 1093 __ set(value, O7); 1094 } 1095 if (addr->index()->is_valid()) { 1096 assert(addr->disp() == 0, "must be zero"); 1097 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1098 } else { 1099 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1100 offset = store(tmp, base, addr->disp(), type, wide, false); 1101 } 1102 break; 1103 } 1104 case T_LONG: 1105 case T_DOUBLE: { 1106 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1107 assert(Assembler::is_simm13(addr->disp()) && 1108 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1109 1110 LIR_Opr tmp = FrameMap::O7_opr; 1111 int value_lo = c->as_jint_lo_bits(); 1112 if (value_lo == 0) { 1113 tmp = FrameMap::G0_opr; 1114 } else { 1115 __ set(value_lo, O7); 1116 } 1117 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1118 int value_hi = c->as_jint_hi_bits(); 1119 if (value_hi == 0) { 1120 tmp = FrameMap::G0_opr; 1121 } else { 1122 __ set(value_hi, O7); 1123 } 1124 offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1125 break; 1126 } 1127 case T_OBJECT: { 1128 jobject obj = c->as_jobject(); 1129 LIR_Opr tmp; 1130 if (obj == NULL) { 1131 tmp = FrameMap::G0_opr; 1132 } else { 1133 tmp = FrameMap::O7_opr; 1134 jobject2reg(c->as_jobject(), O7); 1135 } 1136 // handle either reg+reg or reg+disp address 1137 if (addr->index()->is_valid()) { 1138 assert(addr->disp() == 0, "must be zero"); 1139 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1140 } else { 1141 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1142 offset = store(tmp, base, addr->disp(), type, wide, false); 1143 } 1144 1145 break; 1146 } 1147 default: 1148 Unimplemented(); 1149 } 1150 if (info != NULL) { 1151 assert(offset != -1, "offset should've been set"); 1152 add_debug_info_for_null_check(offset, info); 1153 } 1154 } 1155 1156 1157 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1158 LIR_Const* c = src->as_constant_ptr(); 1159 LIR_Opr to_reg = dest; 1160 1161 switch (c->type()) { 1162 case T_INT: 1163 case T_ADDRESS: 1164 { 1165 jint con = c->as_jint(); 1166 if (to_reg->is_single_cpu()) { 1167 assert(patch_code == lir_patch_none, "no patching handled here"); 1168 __ set(con, to_reg->as_register()); 1169 } else { 1170 ShouldNotReachHere(); 1171 assert(to_reg->is_single_fpu(), "wrong register kind"); 1172 1173 __ set(con, O7); 1174 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1175 __ st(O7, temp_slot); 1176 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1177 } 1178 } 1179 break; 1180 1181 case T_LONG: 1182 { 1183 jlong con = c->as_jlong(); 1184 1185 if (to_reg->is_double_cpu()) { 1186 #ifdef _LP64 1187 __ set(con, to_reg->as_register_lo()); 1188 #else 1189 __ set(low(con), to_reg->as_register_lo()); 1190 __ set(high(con), to_reg->as_register_hi()); 1191 #endif 1192 #ifdef _LP64 1193 } else if (to_reg->is_single_cpu()) { 1194 __ set(con, to_reg->as_register()); 1195 #endif 1196 } else { 1197 ShouldNotReachHere(); 1198 assert(to_reg->is_double_fpu(), "wrong register kind"); 1199 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1200 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1201 __ set(low(con), O7); 1202 __ st(O7, temp_slot_lo); 1203 __ set(high(con), O7); 1204 __ st(O7, temp_slot_hi); 1205 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1206 } 1207 } 1208 break; 1209 1210 case T_OBJECT: 1211 { 1212 if (patch_code == lir_patch_none) { 1213 jobject2reg(c->as_jobject(), to_reg->as_register()); 1214 } else { 1215 jobject2reg_with_patching(to_reg->as_register(), info); 1216 } 1217 } 1218 break; 1219 1220 case T_FLOAT: 1221 { 1222 address const_addr = __ float_constant(c->as_jfloat()); 1223 if (const_addr == NULL) { 1224 bailout("const section overflow"); 1225 break; 1226 } 1227 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1228 AddressLiteral const_addrlit(const_addr, rspec); 1229 if (to_reg->is_single_fpu()) { 1230 __ patchable_sethi(const_addrlit, O7); 1231 __ relocate(rspec); 1232 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1233 1234 } else { 1235 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1236 1237 __ set(const_addrlit, O7); 1238 __ ld(O7, 0, to_reg->as_register()); 1239 } 1240 } 1241 break; 1242 1243 case T_DOUBLE: 1244 { 1245 address const_addr = __ double_constant(c->as_jdouble()); 1246 if (const_addr == NULL) { 1247 bailout("const section overflow"); 1248 break; 1249 } 1250 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1251 1252 if (to_reg->is_double_fpu()) { 1253 AddressLiteral const_addrlit(const_addr, rspec); 1254 __ patchable_sethi(const_addrlit, O7); 1255 __ relocate(rspec); 1256 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1257 } else { 1258 assert(to_reg->is_double_cpu(), "Must be a long register."); 1259 #ifdef _LP64 1260 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1261 #else 1262 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1263 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1264 #endif 1265 } 1266 1267 } 1268 break; 1269 1270 default: 1271 ShouldNotReachHere(); 1272 } 1273 } 1274 1275 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1276 Register reg = addr->base()->as_register(); 1277 return Address(reg, addr->disp()); 1278 } 1279 1280 1281 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1282 switch (type) { 1283 case T_INT: 1284 case T_FLOAT: { 1285 Register tmp = O7; 1286 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1287 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1288 __ lduw(from.base(), from.disp(), tmp); 1289 __ stw(tmp, to.base(), to.disp()); 1290 break; 1291 } 1292 case T_OBJECT: { 1293 Register tmp = O7; 1294 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1295 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1296 __ ld_ptr(from.base(), from.disp(), tmp); 1297 __ st_ptr(tmp, to.base(), to.disp()); 1298 break; 1299 } 1300 case T_LONG: 1301 case T_DOUBLE: { 1302 Register tmp = O7; 1303 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1304 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1305 __ lduw(from.base(), from.disp(), tmp); 1306 __ stw(tmp, to.base(), to.disp()); 1307 __ lduw(from.base(), from.disp() + 4, tmp); 1308 __ stw(tmp, to.base(), to.disp() + 4); 1309 break; 1310 } 1311 1312 default: 1313 ShouldNotReachHere(); 1314 } 1315 } 1316 1317 1318 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1319 Address base = as_Address(addr); 1320 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1321 } 1322 1323 1324 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1325 Address base = as_Address(addr); 1326 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1327 } 1328 1329 1330 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1331 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1332 1333 LIR_Address* addr = src_opr->as_address_ptr(); 1334 LIR_Opr to_reg = dest; 1335 1336 Register src = addr->base()->as_pointer_register(); 1337 Register disp_reg = noreg; 1338 int disp_value = addr->disp(); 1339 bool needs_patching = (patch_code != lir_patch_none); 1340 1341 if (addr->base()->type() == T_OBJECT) { 1342 __ verify_oop(src); 1343 } 1344 1345 PatchingStub* patch = NULL; 1346 if (needs_patching) { 1347 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1348 assert(!to_reg->is_double_cpu() || 1349 patch_code == lir_patch_none || 1350 patch_code == lir_patch_normal, "patching doesn't match register"); 1351 } 1352 1353 if (addr->index()->is_illegal()) { 1354 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1355 if (needs_patching) { 1356 __ patchable_set(0, O7); 1357 } else { 1358 __ set(disp_value, O7); 1359 } 1360 disp_reg = O7; 1361 } 1362 } else if (unaligned || PatchALot) { 1363 __ add(src, addr->index()->as_register(), O7); 1364 src = O7; 1365 } else { 1366 disp_reg = addr->index()->as_pointer_register(); 1367 assert(disp_value == 0, "can't handle 3 operand addresses"); 1368 } 1369 1370 // remember the offset of the load. The patching_epilog must be done 1371 // before the call to add_debug_info, otherwise the PcDescs don't get 1372 // entered in increasing order. 1373 int offset = code_offset(); 1374 1375 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1376 if (disp_reg == noreg) { 1377 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1378 } else { 1379 assert(!unaligned, "can't handle this"); 1380 offset = load(src, disp_reg, to_reg, type, wide); 1381 } 1382 1383 if (patch != NULL) { 1384 patching_epilog(patch, patch_code, src, info); 1385 } 1386 if (info != NULL) add_debug_info_for_null_check(offset, info); 1387 } 1388 1389 1390 void LIR_Assembler::prefetchr(LIR_Opr src) { 1391 LIR_Address* addr = src->as_address_ptr(); 1392 Address from_addr = as_Address(addr); 1393 1394 if (VM_Version::has_v9()) { 1395 __ prefetch(from_addr, Assembler::severalReads); 1396 } 1397 } 1398 1399 1400 void LIR_Assembler::prefetchw(LIR_Opr src) { 1401 LIR_Address* addr = src->as_address_ptr(); 1402 Address from_addr = as_Address(addr); 1403 1404 if (VM_Version::has_v9()) { 1405 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); 1406 } 1407 } 1408 1409 1410 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1411 Address addr; 1412 if (src->is_single_word()) { 1413 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1414 } else if (src->is_double_word()) { 1415 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1416 } 1417 1418 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1419 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1420 } 1421 1422 1423 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1424 Address addr; 1425 if (dest->is_single_word()) { 1426 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1427 } else if (dest->is_double_word()) { 1428 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1429 } 1430 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1431 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1432 } 1433 1434 1435 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1436 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1437 if (from_reg->is_double_fpu()) { 1438 // double to double moves 1439 assert(to_reg->is_double_fpu(), "should match"); 1440 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1441 } else { 1442 // float to float moves 1443 assert(to_reg->is_single_fpu(), "should match"); 1444 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1445 } 1446 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1447 if (from_reg->is_double_cpu()) { 1448 #ifdef _LP64 1449 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1450 #else 1451 assert(to_reg->is_double_cpu() && 1452 from_reg->as_register_hi() != to_reg->as_register_lo() && 1453 from_reg->as_register_lo() != to_reg->as_register_hi(), 1454 "should both be long and not overlap"); 1455 // long to long moves 1456 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1457 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1458 #endif 1459 #ifdef _LP64 1460 } else if (to_reg->is_double_cpu()) { 1461 // int to int moves 1462 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1463 #endif 1464 } else { 1465 // int to int moves 1466 __ mov(from_reg->as_register(), to_reg->as_register()); 1467 } 1468 } else { 1469 ShouldNotReachHere(); 1470 } 1471 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1472 __ verify_oop(to_reg->as_register()); 1473 } 1474 } 1475 1476 1477 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1478 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1479 bool wide, bool unaligned) { 1480 LIR_Address* addr = dest->as_address_ptr(); 1481 1482 Register src = addr->base()->as_pointer_register(); 1483 Register disp_reg = noreg; 1484 int disp_value = addr->disp(); 1485 bool needs_patching = (patch_code != lir_patch_none); 1486 1487 if (addr->base()->is_oop_register()) { 1488 __ verify_oop(src); 1489 } 1490 1491 PatchingStub* patch = NULL; 1492 if (needs_patching) { 1493 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1494 assert(!from_reg->is_double_cpu() || 1495 patch_code == lir_patch_none || 1496 patch_code == lir_patch_normal, "patching doesn't match register"); 1497 } 1498 1499 if (addr->index()->is_illegal()) { 1500 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1501 if (needs_patching) { 1502 __ patchable_set(0, O7); 1503 } else { 1504 __ set(disp_value, O7); 1505 } 1506 disp_reg = O7; 1507 } 1508 } else if (unaligned || PatchALot) { 1509 __ add(src, addr->index()->as_register(), O7); 1510 src = O7; 1511 } else { 1512 disp_reg = addr->index()->as_pointer_register(); 1513 assert(disp_value == 0, "can't handle 3 operand addresses"); 1514 } 1515 1516 // remember the offset of the store. The patching_epilog must be done 1517 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1518 // entered in increasing order. 1519 int offset; 1520 1521 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1522 if (disp_reg == noreg) { 1523 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1524 } else { 1525 assert(!unaligned, "can't handle this"); 1526 offset = store(from_reg, src, disp_reg, type, wide); 1527 } 1528 1529 if (patch != NULL) { 1530 patching_epilog(patch, patch_code, src, info); 1531 } 1532 1533 if (info != NULL) add_debug_info_for_null_check(offset, info); 1534 } 1535 1536 1537 void LIR_Assembler::return_op(LIR_Opr result) { 1538 // the poll may need a register so just pick one that isn't the return register 1539 #if defined(TIERED) && !defined(_LP64) 1540 if (result->type_field() == LIR_OprDesc::long_type) { 1541 // Must move the result to G1 1542 // Must leave proper result in O0,O1 and G1 (TIERED only) 1543 __ sllx(I0, 32, G1); // Shift bits into high G1 1544 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1545 __ or3 (I1, G1, G1); // OR 64 bits into G1 1546 #ifdef ASSERT 1547 // mangle it so any problems will show up 1548 __ set(0xdeadbeef, I0); 1549 __ set(0xdeadbeef, I1); 1550 #endif 1551 } 1552 #endif // TIERED 1553 __ set((intptr_t)os::get_polling_page(), L0); 1554 __ relocate(relocInfo::poll_return_type); 1555 __ ld_ptr(L0, 0, G0); 1556 __ ret(); 1557 __ delayed()->restore(); 1558 } 1559 1560 1561 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1562 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1563 if (info != NULL) { 1564 add_debug_info_for_branch(info); 1565 } else { 1566 __ relocate(relocInfo::poll_type); 1567 } 1568 1569 int offset = __ offset(); 1570 __ ld_ptr(tmp->as_register(), 0, G0); 1571 1572 return offset; 1573 } 1574 1575 1576 void LIR_Assembler::emit_static_call_stub() { 1577 address call_pc = __ pc(); 1578 address stub = __ start_a_stub(call_stub_size); 1579 if (stub == NULL) { 1580 bailout("static call stub overflow"); 1581 return; 1582 } 1583 1584 int start = __ offset(); 1585 __ relocate(static_stub_Relocation::spec(call_pc)); 1586 1587 __ set_oop(NULL, G5); 1588 // must be set to -1 at code generation time 1589 AddressLiteral addrlit(-1); 1590 __ jump_to(addrlit, G3); 1591 __ delayed()->nop(); 1592 1593 assert(__ offset() - start <= call_stub_size, "stub too big"); 1594 __ end_a_stub(); 1595 } 1596 1597 1598 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1599 if (opr1->is_single_fpu()) { 1600 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1601 } else if (opr1->is_double_fpu()) { 1602 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1603 } else if (opr1->is_single_cpu()) { 1604 if (opr2->is_constant()) { 1605 switch (opr2->as_constant_ptr()->type()) { 1606 case T_INT: 1607 { jint con = opr2->as_constant_ptr()->as_jint(); 1608 if (Assembler::is_simm13(con)) { 1609 __ cmp(opr1->as_register(), con); 1610 } else { 1611 __ set(con, O7); 1612 __ cmp(opr1->as_register(), O7); 1613 } 1614 } 1615 break; 1616 1617 case T_OBJECT: 1618 // there are only equal/notequal comparisions on objects 1619 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1620 if (con == NULL) { 1621 __ cmp(opr1->as_register(), 0); 1622 } else { 1623 jobject2reg(con, O7); 1624 __ cmp(opr1->as_register(), O7); 1625 } 1626 } 1627 break; 1628 1629 default: 1630 ShouldNotReachHere(); 1631 break; 1632 } 1633 } else { 1634 if (opr2->is_address()) { 1635 LIR_Address * addr = opr2->as_address_ptr(); 1636 BasicType type = addr->type(); 1637 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1638 else __ ld(as_Address(addr), O7); 1639 __ cmp(opr1->as_register(), O7); 1640 } else { 1641 __ cmp(opr1->as_register(), opr2->as_register()); 1642 } 1643 } 1644 } else if (opr1->is_double_cpu()) { 1645 Register xlo = opr1->as_register_lo(); 1646 Register xhi = opr1->as_register_hi(); 1647 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1648 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1649 #ifdef _LP64 1650 __ orcc(xhi, G0, G0); 1651 #else 1652 __ orcc(xhi, xlo, G0); 1653 #endif 1654 } else if (opr2->is_register()) { 1655 Register ylo = opr2->as_register_lo(); 1656 Register yhi = opr2->as_register_hi(); 1657 #ifdef _LP64 1658 __ cmp(xlo, ylo); 1659 #else 1660 __ subcc(xlo, ylo, xlo); 1661 __ subccc(xhi, yhi, xhi); 1662 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1663 __ orcc(xhi, xlo, G0); 1664 } 1665 #endif 1666 } else { 1667 ShouldNotReachHere(); 1668 } 1669 } else if (opr1->is_address()) { 1670 LIR_Address * addr = opr1->as_address_ptr(); 1671 BasicType type = addr->type(); 1672 assert (opr2->is_constant(), "Checking"); 1673 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1674 else __ ld(as_Address(addr), O7); 1675 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1676 } else { 1677 ShouldNotReachHere(); 1678 } 1679 } 1680 1681 1682 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1683 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1684 bool is_unordered_less = (code == lir_ucmp_fd2i); 1685 if (left->is_single_fpu()) { 1686 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1687 } else if (left->is_double_fpu()) { 1688 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1689 } else { 1690 ShouldNotReachHere(); 1691 } 1692 } else if (code == lir_cmp_l2i) { 1693 #ifdef _LP64 1694 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1695 #else 1696 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1697 right->as_register_hi(), right->as_register_lo(), 1698 dst->as_register()); 1699 #endif 1700 } else { 1701 ShouldNotReachHere(); 1702 } 1703 } 1704 1705 1706 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1707 Assembler::Condition acond; 1708 switch (condition) { 1709 case lir_cond_equal: acond = Assembler::equal; break; 1710 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1711 case lir_cond_less: acond = Assembler::less; break; 1712 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1713 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1714 case lir_cond_greater: acond = Assembler::greater; break; 1715 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1716 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1717 default: ShouldNotReachHere(); 1718 }; 1719 1720 if (opr1->is_constant() && opr1->type() == T_INT) { 1721 Register dest = result->as_register(); 1722 // load up first part of constant before branch 1723 // and do the rest in the delay slot. 1724 if (!Assembler::is_simm13(opr1->as_jint())) { 1725 __ sethi(opr1->as_jint(), dest); 1726 } 1727 } else if (opr1->is_constant()) { 1728 const2reg(opr1, result, lir_patch_none, NULL); 1729 } else if (opr1->is_register()) { 1730 reg2reg(opr1, result); 1731 } else if (opr1->is_stack()) { 1732 stack2reg(opr1, result, result->type()); 1733 } else { 1734 ShouldNotReachHere(); 1735 } 1736 Label skip; 1737 #ifdef _LP64 1738 if (type == T_INT) { 1739 __ br(acond, false, Assembler::pt, skip); 1740 } else 1741 #endif 1742 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1743 if (opr1->is_constant() && opr1->type() == T_INT) { 1744 Register dest = result->as_register(); 1745 if (Assembler::is_simm13(opr1->as_jint())) { 1746 __ delayed()->or3(G0, opr1->as_jint(), dest); 1747 } else { 1748 // the sethi has been done above, so just put in the low 10 bits 1749 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1750 } 1751 } else { 1752 // can't do anything useful in the delay slot 1753 __ delayed()->nop(); 1754 } 1755 if (opr2->is_constant()) { 1756 const2reg(opr2, result, lir_patch_none, NULL); 1757 } else if (opr2->is_register()) { 1758 reg2reg(opr2, result); 1759 } else if (opr2->is_stack()) { 1760 stack2reg(opr2, result, result->type()); 1761 } else { 1762 ShouldNotReachHere(); 1763 } 1764 __ bind(skip); 1765 } 1766 1767 1768 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1769 assert(info == NULL, "unused on this code path"); 1770 assert(left->is_register(), "wrong items state"); 1771 assert(dest->is_register(), "wrong items state"); 1772 1773 if (right->is_register()) { 1774 if (dest->is_float_kind()) { 1775 1776 FloatRegister lreg, rreg, res; 1777 FloatRegisterImpl::Width w; 1778 if (right->is_single_fpu()) { 1779 w = FloatRegisterImpl::S; 1780 lreg = left->as_float_reg(); 1781 rreg = right->as_float_reg(); 1782 res = dest->as_float_reg(); 1783 } else { 1784 w = FloatRegisterImpl::D; 1785 lreg = left->as_double_reg(); 1786 rreg = right->as_double_reg(); 1787 res = dest->as_double_reg(); 1788 } 1789 1790 switch (code) { 1791 case lir_add: __ fadd(w, lreg, rreg, res); break; 1792 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1793 case lir_mul: // fall through 1794 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1795 case lir_div: // fall through 1796 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1797 default: ShouldNotReachHere(); 1798 } 1799 1800 } else if (dest->is_double_cpu()) { 1801 #ifdef _LP64 1802 Register dst_lo = dest->as_register_lo(); 1803 Register op1_lo = left->as_pointer_register(); 1804 Register op2_lo = right->as_pointer_register(); 1805 1806 switch (code) { 1807 case lir_add: 1808 __ add(op1_lo, op2_lo, dst_lo); 1809 break; 1810 1811 case lir_sub: 1812 __ sub(op1_lo, op2_lo, dst_lo); 1813 break; 1814 1815 default: ShouldNotReachHere(); 1816 } 1817 #else 1818 Register op1_lo = left->as_register_lo(); 1819 Register op1_hi = left->as_register_hi(); 1820 Register op2_lo = right->as_register_lo(); 1821 Register op2_hi = right->as_register_hi(); 1822 Register dst_lo = dest->as_register_lo(); 1823 Register dst_hi = dest->as_register_hi(); 1824 1825 switch (code) { 1826 case lir_add: 1827 __ addcc(op1_lo, op2_lo, dst_lo); 1828 __ addc (op1_hi, op2_hi, dst_hi); 1829 break; 1830 1831 case lir_sub: 1832 __ subcc(op1_lo, op2_lo, dst_lo); 1833 __ subc (op1_hi, op2_hi, dst_hi); 1834 break; 1835 1836 default: ShouldNotReachHere(); 1837 } 1838 #endif 1839 } else { 1840 assert (right->is_single_cpu(), "Just Checking"); 1841 1842 Register lreg = left->as_register(); 1843 Register res = dest->as_register(); 1844 Register rreg = right->as_register(); 1845 switch (code) { 1846 case lir_add: __ add (lreg, rreg, res); break; 1847 case lir_sub: __ sub (lreg, rreg, res); break; 1848 case lir_mul: __ mult (lreg, rreg, res); break; 1849 default: ShouldNotReachHere(); 1850 } 1851 } 1852 } else { 1853 assert (right->is_constant(), "must be constant"); 1854 1855 if (dest->is_single_cpu()) { 1856 Register lreg = left->as_register(); 1857 Register res = dest->as_register(); 1858 int simm13 = right->as_constant_ptr()->as_jint(); 1859 1860 switch (code) { 1861 case lir_add: __ add (lreg, simm13, res); break; 1862 case lir_sub: __ sub (lreg, simm13, res); break; 1863 case lir_mul: __ mult (lreg, simm13, res); break; 1864 default: ShouldNotReachHere(); 1865 } 1866 } else { 1867 Register lreg = left->as_pointer_register(); 1868 Register res = dest->as_register_lo(); 1869 long con = right->as_constant_ptr()->as_jlong(); 1870 assert(Assembler::is_simm13(con), "must be simm13"); 1871 1872 switch (code) { 1873 case lir_add: __ add (lreg, (int)con, res); break; 1874 case lir_sub: __ sub (lreg, (int)con, res); break; 1875 case lir_mul: __ mult (lreg, (int)con, res); break; 1876 default: ShouldNotReachHere(); 1877 } 1878 } 1879 } 1880 } 1881 1882 1883 void LIR_Assembler::fpop() { 1884 // do nothing 1885 } 1886 1887 1888 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1889 switch (code) { 1890 case lir_sin: 1891 case lir_tan: 1892 case lir_cos: { 1893 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1894 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1895 break; 1896 } 1897 case lir_sqrt: { 1898 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1899 FloatRegister src_reg = value->as_double_reg(); 1900 FloatRegister dst_reg = dest->as_double_reg(); 1901 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1902 break; 1903 } 1904 case lir_abs: { 1905 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1906 FloatRegister src_reg = value->as_double_reg(); 1907 FloatRegister dst_reg = dest->as_double_reg(); 1908 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1909 break; 1910 } 1911 default: { 1912 ShouldNotReachHere(); 1913 break; 1914 } 1915 } 1916 } 1917 1918 1919 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1920 if (right->is_constant()) { 1921 if (dest->is_single_cpu()) { 1922 int simm13 = right->as_constant_ptr()->as_jint(); 1923 switch (code) { 1924 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1925 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1926 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1927 default: ShouldNotReachHere(); 1928 } 1929 } else { 1930 long c = right->as_constant_ptr()->as_jlong(); 1931 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1932 int simm13 = (int)c; 1933 switch (code) { 1934 case lir_logic_and: 1935 #ifndef _LP64 1936 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1937 #endif 1938 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1939 break; 1940 1941 case lir_logic_or: 1942 #ifndef _LP64 1943 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 1944 #endif 1945 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1946 break; 1947 1948 case lir_logic_xor: 1949 #ifndef _LP64 1950 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 1951 #endif 1952 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1953 break; 1954 1955 default: ShouldNotReachHere(); 1956 } 1957 } 1958 } else { 1959 assert(right->is_register(), "right should be in register"); 1960 1961 if (dest->is_single_cpu()) { 1962 switch (code) { 1963 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1964 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1965 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1966 default: ShouldNotReachHere(); 1967 } 1968 } else { 1969 #ifdef _LP64 1970 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1971 left->as_register_lo(); 1972 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1973 right->as_register_lo(); 1974 1975 switch (code) { 1976 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1977 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1978 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1979 default: ShouldNotReachHere(); 1980 } 1981 #else 1982 switch (code) { 1983 case lir_logic_and: 1984 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1985 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1986 break; 1987 1988 case lir_logic_or: 1989 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1990 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1991 break; 1992 1993 case lir_logic_xor: 1994 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1995 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1996 break; 1997 1998 default: ShouldNotReachHere(); 1999 } 2000 #endif 2001 } 2002 } 2003 } 2004 2005 2006 int LIR_Assembler::shift_amount(BasicType t) { 2007 int elem_size = type2aelembytes(t); 2008 switch (elem_size) { 2009 case 1 : return 0; 2010 case 2 : return 1; 2011 case 4 : return 2; 2012 case 8 : return 3; 2013 } 2014 ShouldNotReachHere(); 2015 return -1; 2016 } 2017 2018 2019 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2020 assert(exceptionOop->as_register() == Oexception, "should match"); 2021 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 2022 2023 info->add_register_oop(exceptionOop); 2024 2025 // reuse the debug info from the safepoint poll for the throw op itself 2026 address pc_for_athrow = __ pc(); 2027 int pc_for_athrow_offset = __ offset(); 2028 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 2029 __ set(pc_for_athrow, Oissuing_pc, rspec); 2030 add_call_info(pc_for_athrow_offset, info); // for exception handler 2031 2032 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 2033 __ delayed()->nop(); 2034 } 2035 2036 2037 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2038 assert(exceptionOop->as_register() == Oexception, "should match"); 2039 2040 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 2041 __ delayed()->nop(); 2042 } 2043 2044 2045 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2046 Register src = op->src()->as_register(); 2047 Register dst = op->dst()->as_register(); 2048 Register src_pos = op->src_pos()->as_register(); 2049 Register dst_pos = op->dst_pos()->as_register(); 2050 Register length = op->length()->as_register(); 2051 Register tmp = op->tmp()->as_register(); 2052 Register tmp2 = O7; 2053 2054 int flags = op->flags(); 2055 ciArrayKlass* default_type = op->expected_type(); 2056 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2057 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2058 2059 #ifdef _LP64 2060 // higher 32bits must be null 2061 __ sra(dst_pos, 0, dst_pos); 2062 __ sra(src_pos, 0, src_pos); 2063 __ sra(length, 0, length); 2064 #endif 2065 2066 // set up the arraycopy stub information 2067 ArrayCopyStub* stub = op->stub(); 2068 2069 // always do stub if no type information is available. it's ok if 2070 // the known type isn't loaded since the code sanity checks 2071 // in debug mode and the type isn't required when we know the exact type 2072 // also check that the type is an array type. 2073 if (op->expected_type() == NULL) { 2074 __ mov(src, O0); 2075 __ mov(src_pos, O1); 2076 __ mov(dst, O2); 2077 __ mov(dst_pos, O3); 2078 __ mov(length, O4); 2079 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2080 2081 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2082 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2083 } else { 2084 #ifndef PRODUCT 2085 if (PrintC1Statistics) { 2086 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 2087 __ inc_counter(counter, G1, G3); 2088 } 2089 #endif 2090 __ call_VM_leaf(tmp, copyfunc_addr); 2091 } 2092 2093 if (copyfunc_addr != NULL) { 2094 __ xor3(O0, -1, tmp); 2095 __ sub(length, tmp, length); 2096 __ add(src_pos, tmp, src_pos); 2097 __ tst(O0); 2098 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2099 __ delayed()->add(dst_pos, tmp, dst_pos); 2100 } else { 2101 __ tst(O0); 2102 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2103 __ delayed()->nop(); 2104 } 2105 __ bind(*stub->continuation()); 2106 return; 2107 } 2108 2109 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2110 2111 // make sure src and dst are non-null and load array length 2112 if (flags & LIR_OpArrayCopy::src_null_check) { 2113 __ tst(src); 2114 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2115 __ delayed()->nop(); 2116 } 2117 2118 if (flags & LIR_OpArrayCopy::dst_null_check) { 2119 __ tst(dst); 2120 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2121 __ delayed()->nop(); 2122 } 2123 2124 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2125 // test src_pos register 2126 __ tst(src_pos); 2127 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2128 __ delayed()->nop(); 2129 } 2130 2131 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2132 // test dst_pos register 2133 __ tst(dst_pos); 2134 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2135 __ delayed()->nop(); 2136 } 2137 2138 if (flags & LIR_OpArrayCopy::length_positive_check) { 2139 // make sure length isn't negative 2140 __ tst(length); 2141 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2142 __ delayed()->nop(); 2143 } 2144 2145 if (flags & LIR_OpArrayCopy::src_range_check) { 2146 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2147 __ add(length, src_pos, tmp); 2148 __ cmp(tmp2, tmp); 2149 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2150 __ delayed()->nop(); 2151 } 2152 2153 if (flags & LIR_OpArrayCopy::dst_range_check) { 2154 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2155 __ add(length, dst_pos, tmp); 2156 __ cmp(tmp2, tmp); 2157 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2158 __ delayed()->nop(); 2159 } 2160 2161 int shift = shift_amount(basic_type); 2162 2163 if (flags & LIR_OpArrayCopy::type_check) { 2164 // We don't know the array types are compatible 2165 if (basic_type != T_OBJECT) { 2166 // Simple test for basic type arrays 2167 if (UseCompressedOops) { 2168 // We don't need decode because we just need to compare 2169 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2170 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2171 __ cmp(tmp, tmp2); 2172 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2173 } else { 2174 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2175 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2176 __ cmp(tmp, tmp2); 2177 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2178 } 2179 __ delayed()->nop(); 2180 } else { 2181 // For object arrays, if src is a sub class of dst then we can 2182 // safely do the copy. 2183 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2184 2185 Label cont, slow; 2186 assert_different_registers(tmp, tmp2, G3, G1); 2187 2188 __ load_klass(src, G3); 2189 __ load_klass(dst, G1); 2190 2191 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2192 2193 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2194 __ delayed()->nop(); 2195 2196 __ cmp(G3, 0); 2197 if (copyfunc_addr != NULL) { // use stub if available 2198 // src is not a sub class of dst so we have to do a 2199 // per-element check. 2200 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2201 __ delayed()->nop(); 2202 2203 __ bind(slow); 2204 2205 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2206 if ((flags & mask) != mask) { 2207 // Check that at least both of them object arrays. 2208 assert(flags & mask, "one of the two should be known to be an object array"); 2209 2210 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2211 __ load_klass(src, tmp); 2212 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2213 __ load_klass(dst, tmp); 2214 } 2215 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2216 Klass::layout_helper_offset_in_bytes(); 2217 2218 __ lduw(tmp, lh_offset, tmp2); 2219 2220 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2221 __ set(objArray_lh, tmp); 2222 __ cmp(tmp, tmp2); 2223 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2224 __ delayed()->nop(); 2225 } 2226 2227 Register src_ptr = O0; 2228 Register dst_ptr = O1; 2229 Register len = O2; 2230 Register chk_off = O3; 2231 Register super_k = O4; 2232 2233 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2234 if (shift == 0) { 2235 __ add(src_ptr, src_pos, src_ptr); 2236 } else { 2237 __ sll(src_pos, shift, tmp); 2238 __ add(src_ptr, tmp, src_ptr); 2239 } 2240 2241 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2242 if (shift == 0) { 2243 __ add(dst_ptr, dst_pos, dst_ptr); 2244 } else { 2245 __ sll(dst_pos, shift, tmp); 2246 __ add(dst_ptr, tmp, dst_ptr); 2247 } 2248 __ mov(length, len); 2249 __ load_klass(dst, tmp); 2250 2251 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 2252 objArrayKlass::element_klass_offset_in_bytes()); 2253 __ ld_ptr(tmp, ek_offset, super_k); 2254 2255 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2256 Klass::super_check_offset_offset_in_bytes()); 2257 __ lduw(super_k, sco_offset, chk_off); 2258 2259 __ call_VM_leaf(tmp, copyfunc_addr); 2260 2261 #ifndef PRODUCT 2262 if (PrintC1Statistics) { 2263 Label failed; 2264 __ br_notnull(O0, false, Assembler::pn, failed); 2265 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2266 __ bind(failed); 2267 } 2268 #endif 2269 2270 __ br_null(O0, false, Assembler::pt, *stub->continuation(), false); 2271 __ delayed()->xor3(O0, -1, tmp); 2272 2273 #ifndef PRODUCT 2274 if (PrintC1Statistics) { 2275 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2276 } 2277 #endif 2278 2279 __ sub(length, tmp, length); 2280 __ add(src_pos, tmp, src_pos); 2281 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2282 __ delayed()->add(dst_pos, tmp, dst_pos); 2283 2284 __ bind(cont); 2285 } else { 2286 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2287 __ delayed()->nop(); 2288 __ bind(cont); 2289 } 2290 } 2291 } 2292 2293 #ifdef ASSERT 2294 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2295 // Sanity check the known type with the incoming class. For the 2296 // primitive case the types must match exactly with src.klass and 2297 // dst.klass each exactly matching the default type. For the 2298 // object array case, if no type check is needed then either the 2299 // dst type is exactly the expected type and the src type is a 2300 // subtype which we can't check or src is the same array as dst 2301 // but not necessarily exactly of type default_type. 2302 Label known_ok, halt; 2303 jobject2reg(op->expected_type()->constant_encoding(), tmp); 2304 if (UseCompressedOops) { 2305 // tmp holds the default type. It currently comes uncompressed after the 2306 // load of a constant, so encode it. 2307 __ encode_heap_oop(tmp); 2308 // load the raw value of the dst klass, since we will be comparing 2309 // uncompressed values directly. 2310 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2311 if (basic_type != T_OBJECT) { 2312 __ cmp(tmp, tmp2); 2313 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2314 // load the raw value of the src klass. 2315 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2316 __ cmp_and_br(tmp, tmp2, Assembler::equal, false, Assembler::pn, known_ok); 2317 } else { 2318 __ cmp(tmp, tmp2); 2319 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2320 __ delayed()->cmp(src, dst); 2321 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2322 __ delayed()->nop(); 2323 } 2324 } else { 2325 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2326 if (basic_type != T_OBJECT) { 2327 __ cmp(tmp, tmp2); 2328 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2329 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2330 __ cmp_and_brx(tmp, tmp2, Assembler::equal, false, Assembler::pn, known_ok); 2331 } else { 2332 __ cmp(tmp, tmp2); 2333 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2334 __ delayed()->cmp(src, dst); 2335 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2336 __ delayed()->nop(); 2337 } 2338 } 2339 __ bind(halt); 2340 __ stop("incorrect type information in arraycopy"); 2341 __ bind(known_ok); 2342 } 2343 #endif 2344 2345 #ifndef PRODUCT 2346 if (PrintC1Statistics) { 2347 address counter = Runtime1::arraycopy_count_address(basic_type); 2348 __ inc_counter(counter, G1, G3); 2349 } 2350 #endif 2351 2352 Register src_ptr = O0; 2353 Register dst_ptr = O1; 2354 Register len = O2; 2355 2356 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2357 if (shift == 0) { 2358 __ add(src_ptr, src_pos, src_ptr); 2359 } else { 2360 __ sll(src_pos, shift, tmp); 2361 __ add(src_ptr, tmp, src_ptr); 2362 } 2363 2364 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2365 if (shift == 0) { 2366 __ add(dst_ptr, dst_pos, dst_ptr); 2367 } else { 2368 __ sll(dst_pos, shift, tmp); 2369 __ add(dst_ptr, tmp, dst_ptr); 2370 } 2371 2372 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2373 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2374 const char *name; 2375 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2376 2377 // arraycopy stubs takes a length in number of elements, so don't scale it. 2378 __ mov(length, len); 2379 __ call_VM_leaf(tmp, entry); 2380 2381 __ bind(*stub->continuation()); 2382 } 2383 2384 2385 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2386 if (dest->is_single_cpu()) { 2387 #ifdef _LP64 2388 if (left->type() == T_OBJECT) { 2389 switch (code) { 2390 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2391 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2392 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2393 default: ShouldNotReachHere(); 2394 } 2395 } else 2396 #endif 2397 switch (code) { 2398 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2399 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2400 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2401 default: ShouldNotReachHere(); 2402 } 2403 } else { 2404 #ifdef _LP64 2405 switch (code) { 2406 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2407 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2408 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2409 default: ShouldNotReachHere(); 2410 } 2411 #else 2412 switch (code) { 2413 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2414 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2415 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2416 default: ShouldNotReachHere(); 2417 } 2418 #endif 2419 } 2420 } 2421 2422 2423 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2424 #ifdef _LP64 2425 if (left->type() == T_OBJECT) { 2426 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2427 Register l = left->as_register(); 2428 Register d = dest->as_register_lo(); 2429 switch (code) { 2430 case lir_shl: __ sllx (l, count, d); break; 2431 case lir_shr: __ srax (l, count, d); break; 2432 case lir_ushr: __ srlx (l, count, d); break; 2433 default: ShouldNotReachHere(); 2434 } 2435 return; 2436 } 2437 #endif 2438 2439 if (dest->is_single_cpu()) { 2440 count = count & 0x1F; // Java spec 2441 switch (code) { 2442 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2443 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2444 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2445 default: ShouldNotReachHere(); 2446 } 2447 } else if (dest->is_double_cpu()) { 2448 count = count & 63; // Java spec 2449 switch (code) { 2450 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2451 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2452 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2453 default: ShouldNotReachHere(); 2454 } 2455 } else { 2456 ShouldNotReachHere(); 2457 } 2458 } 2459 2460 2461 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2462 assert(op->tmp1()->as_register() == G1 && 2463 op->tmp2()->as_register() == G3 && 2464 op->tmp3()->as_register() == G4 && 2465 op->obj()->as_register() == O0 && 2466 op->klass()->as_register() == G5, "must be"); 2467 if (op->init_check()) { 2468 __ ld(op->klass()->as_register(), 2469 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), 2470 op->tmp1()->as_register()); 2471 add_debug_info_for_null_check_here(op->stub()->info()); 2472 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized); 2473 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2474 __ delayed()->nop(); 2475 } 2476 __ allocate_object(op->obj()->as_register(), 2477 op->tmp1()->as_register(), 2478 op->tmp2()->as_register(), 2479 op->tmp3()->as_register(), 2480 op->header_size(), 2481 op->object_size(), 2482 op->klass()->as_register(), 2483 *op->stub()->entry()); 2484 __ bind(*op->stub()->continuation()); 2485 __ verify_oop(op->obj()->as_register()); 2486 } 2487 2488 2489 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2490 assert(op->tmp1()->as_register() == G1 && 2491 op->tmp2()->as_register() == G3 && 2492 op->tmp3()->as_register() == G4 && 2493 op->tmp4()->as_register() == O1 && 2494 op->klass()->as_register() == G5, "must be"); 2495 2496 LP64_ONLY( __ signx(op->len()->as_register()); ) 2497 if (UseSlowPath || 2498 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2499 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2500 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2501 __ delayed()->nop(); 2502 } else { 2503 __ allocate_array(op->obj()->as_register(), 2504 op->len()->as_register(), 2505 op->tmp1()->as_register(), 2506 op->tmp2()->as_register(), 2507 op->tmp3()->as_register(), 2508 arrayOopDesc::header_size(op->type()), 2509 type2aelembytes(op->type()), 2510 op->klass()->as_register(), 2511 *op->stub()->entry()); 2512 } 2513 __ bind(*op->stub()->continuation()); 2514 } 2515 2516 2517 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2518 ciMethodData *md, ciProfileData *data, 2519 Register recv, Register tmp1, Label* update_done) { 2520 uint i; 2521 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2522 Label next_test; 2523 // See if the receiver is receiver[n]. 2524 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2525 mdo_offset_bias); 2526 __ ld_ptr(receiver_addr, tmp1); 2527 __ verify_oop(tmp1); 2528 __ cmp_and_brx(recv, tmp1, Assembler::notEqual, false, Assembler::pt, next_test); 2529 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2530 mdo_offset_bias); 2531 __ ld_ptr(data_addr, tmp1); 2532 __ add(tmp1, DataLayout::counter_increment, tmp1); 2533 __ st_ptr(tmp1, data_addr); 2534 __ ba(*update_done, false); 2535 __ delayed()->nop(); 2536 __ bind(next_test); 2537 } 2538 2539 // Didn't find receiver; find next empty slot and fill it in 2540 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2541 Label next_test; 2542 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2543 mdo_offset_bias); 2544 __ ld_ptr(recv_addr, tmp1); 2545 __ br_notnull(tmp1, false, Assembler::pt, next_test); 2546 __ st_ptr(recv, recv_addr); 2547 __ set(DataLayout::counter_increment, tmp1); 2548 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2549 mdo_offset_bias); 2550 __ ba(*update_done, false); 2551 __ delayed()->nop(); 2552 __ bind(next_test); 2553 } 2554 } 2555 2556 2557 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2558 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2559 md = method->method_data_or_null(); 2560 assert(md != NULL, "Sanity"); 2561 data = md->bci_to_data(bci); 2562 assert(data != NULL, "need data for checkcast"); 2563 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2564 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2565 // The offset is large so bias the mdo by the base of the slot so 2566 // that the ld can use simm13s to reference the slots of the data 2567 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2568 } 2569 } 2570 2571 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2572 // we always need a stub for the failure case. 2573 CodeStub* stub = op->stub(); 2574 Register obj = op->object()->as_register(); 2575 Register k_RInfo = op->tmp1()->as_register(); 2576 Register klass_RInfo = op->tmp2()->as_register(); 2577 Register dst = op->result_opr()->as_register(); 2578 Register Rtmp1 = op->tmp3()->as_register(); 2579 ciKlass* k = op->klass(); 2580 2581 2582 if (obj == k_RInfo) { 2583 k_RInfo = klass_RInfo; 2584 klass_RInfo = obj; 2585 } 2586 2587 ciMethodData* md; 2588 ciProfileData* data; 2589 int mdo_offset_bias = 0; 2590 if (op->should_profile()) { 2591 ciMethod* method = op->profiled_method(); 2592 assert(method != NULL, "Should have method"); 2593 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2594 2595 Label not_null; 2596 __ br_notnull(obj, false, Assembler::pn, not_null); 2597 Register mdo = k_RInfo; 2598 Register data_val = Rtmp1; 2599 jobject2reg(md->constant_encoding(), mdo); 2600 if (mdo_offset_bias > 0) { 2601 __ set(mdo_offset_bias, data_val); 2602 __ add(mdo, data_val, mdo); 2603 } 2604 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2605 __ ldub(flags_addr, data_val); 2606 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2607 __ stb(data_val, flags_addr); 2608 __ ba(*obj_is_null, false); 2609 __ delayed()->nop(); 2610 __ bind(not_null); 2611 } else { 2612 __ br_null(obj, false, Assembler::pn, *obj_is_null, false); 2613 __ delayed()->nop(); 2614 } 2615 2616 Label profile_cast_failure, profile_cast_success; 2617 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2618 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2619 2620 // patching may screw with our temporaries on sparc, 2621 // so let's do it before loading the class 2622 if (k->is_loaded()) { 2623 jobject2reg(k->constant_encoding(), k_RInfo); 2624 } else { 2625 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 2626 } 2627 assert(obj != k_RInfo, "must be different"); 2628 2629 // get object class 2630 // not a safepoint as obj null check happens earlier 2631 __ load_klass(obj, klass_RInfo); 2632 if (op->fast_check()) { 2633 assert_different_registers(klass_RInfo, k_RInfo); 2634 __ cmp(k_RInfo, klass_RInfo); 2635 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2636 __ delayed()->nop(); 2637 } else { 2638 bool need_slow_path = true; 2639 if (k->is_loaded()) { 2640 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()) 2641 need_slow_path = false; 2642 // perform the fast part of the checking logic 2643 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2644 (need_slow_path ? success_target : NULL), 2645 failure_target, NULL, 2646 RegisterOrConstant(k->super_check_offset())); 2647 } else { 2648 // perform the fast part of the checking logic 2649 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2650 failure_target, NULL); 2651 } 2652 if (need_slow_path) { 2653 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2654 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2655 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2656 __ delayed()->nop(); 2657 __ cmp(G3, 0); 2658 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2659 __ delayed()->nop(); 2660 // Fall through to success case 2661 } 2662 } 2663 2664 if (op->should_profile()) { 2665 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2666 assert_different_registers(obj, mdo, recv, tmp1); 2667 __ bind(profile_cast_success); 2668 jobject2reg(md->constant_encoding(), mdo); 2669 if (mdo_offset_bias > 0) { 2670 __ set(mdo_offset_bias, tmp1); 2671 __ add(mdo, tmp1, mdo); 2672 } 2673 __ load_klass(obj, recv); 2674 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2675 // Jump over the failure case 2676 __ ba(*success, false); 2677 __ delayed()->nop(); 2678 // Cast failure case 2679 __ bind(profile_cast_failure); 2680 jobject2reg(md->constant_encoding(), mdo); 2681 if (mdo_offset_bias > 0) { 2682 __ set(mdo_offset_bias, tmp1); 2683 __ add(mdo, tmp1, mdo); 2684 } 2685 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2686 __ ld_ptr(data_addr, tmp1); 2687 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2688 __ st_ptr(tmp1, data_addr); 2689 __ ba(*failure, false); 2690 __ delayed()->nop(); 2691 } 2692 __ ba(*success, false); 2693 __ delayed()->nop(); 2694 } 2695 2696 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2697 LIR_Code code = op->code(); 2698 if (code == lir_store_check) { 2699 Register value = op->object()->as_register(); 2700 Register array = op->array()->as_register(); 2701 Register k_RInfo = op->tmp1()->as_register(); 2702 Register klass_RInfo = op->tmp2()->as_register(); 2703 Register Rtmp1 = op->tmp3()->as_register(); 2704 2705 __ verify_oop(value); 2706 CodeStub* stub = op->stub(); 2707 // check if it needs to be profiled 2708 ciMethodData* md; 2709 ciProfileData* data; 2710 int mdo_offset_bias = 0; 2711 if (op->should_profile()) { 2712 ciMethod* method = op->profiled_method(); 2713 assert(method != NULL, "Should have method"); 2714 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2715 } 2716 Label profile_cast_success, profile_cast_failure, done; 2717 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2718 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2719 2720 if (op->should_profile()) { 2721 Label not_null; 2722 __ br_notnull(value, false, Assembler::pn, not_null); 2723 Register mdo = k_RInfo; 2724 Register data_val = Rtmp1; 2725 jobject2reg(md->constant_encoding(), mdo); 2726 if (mdo_offset_bias > 0) { 2727 __ set(mdo_offset_bias, data_val); 2728 __ add(mdo, data_val, mdo); 2729 } 2730 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2731 __ ldub(flags_addr, data_val); 2732 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2733 __ stb(data_val, flags_addr); 2734 __ ba(done); 2735 __ bind(not_null); 2736 } else { 2737 __ br_null(value, false, Assembler::pn, done); 2738 } 2739 add_debug_info_for_null_check_here(op->info_for_exception()); 2740 __ load_klass(array, k_RInfo); 2741 __ load_klass(value, klass_RInfo); 2742 2743 // get instance klass 2744 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); 2745 // perform the fast part of the checking logic 2746 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2747 2748 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2749 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2750 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2751 __ delayed()->nop(); 2752 __ cmp(G3, 0); 2753 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2754 __ delayed()->nop(); 2755 // fall through to the success case 2756 2757 if (op->should_profile()) { 2758 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2759 assert_different_registers(value, mdo, recv, tmp1); 2760 __ bind(profile_cast_success); 2761 jobject2reg(md->constant_encoding(), mdo); 2762 if (mdo_offset_bias > 0) { 2763 __ set(mdo_offset_bias, tmp1); 2764 __ add(mdo, tmp1, mdo); 2765 } 2766 __ load_klass(value, recv); 2767 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2768 __ ba(done); 2769 // Cast failure case 2770 __ bind(profile_cast_failure); 2771 jobject2reg(md->constant_encoding(), mdo); 2772 if (mdo_offset_bias > 0) { 2773 __ set(mdo_offset_bias, tmp1); 2774 __ add(mdo, tmp1, mdo); 2775 } 2776 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2777 __ ld_ptr(data_addr, tmp1); 2778 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2779 __ st_ptr(tmp1, data_addr); 2780 __ ba(*stub->entry(), false); 2781 __ delayed()->nop(); 2782 } 2783 __ bind(done); 2784 } else if (code == lir_checkcast) { 2785 Register obj = op->object()->as_register(); 2786 Register dst = op->result_opr()->as_register(); 2787 Label success; 2788 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2789 __ bind(success); 2790 __ mov(obj, dst); 2791 } else if (code == lir_instanceof) { 2792 Register obj = op->object()->as_register(); 2793 Register dst = op->result_opr()->as_register(); 2794 Label success, failure, done; 2795 emit_typecheck_helper(op, &success, &failure, &failure); 2796 __ bind(failure); 2797 __ set(0, dst); 2798 __ ba(done); 2799 __ bind(success); 2800 __ set(1, dst); 2801 __ bind(done); 2802 } else { 2803 ShouldNotReachHere(); 2804 } 2805 2806 } 2807 2808 2809 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2810 if (op->code() == lir_cas_long) { 2811 assert(VM_Version::supports_cx8(), "wrong machine"); 2812 Register addr = op->addr()->as_pointer_register(); 2813 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2814 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2815 Register new_value_lo = op->new_value()->as_register_lo(); 2816 Register new_value_hi = op->new_value()->as_register_hi(); 2817 Register t1 = op->tmp1()->as_register(); 2818 Register t2 = op->tmp2()->as_register(); 2819 #ifdef _LP64 2820 __ mov(cmp_value_lo, t1); 2821 __ mov(new_value_lo, t2); 2822 // perform the compare and swap operation 2823 __ casx(addr, t1, t2); 2824 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2825 // overwritten with the original value in "addr" and will be equal to t1. 2826 __ cmp(t1, t2); 2827 #else 2828 // move high and low halves of long values into single registers 2829 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2830 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2831 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2832 __ sllx(new_value_hi, 32, t2); 2833 __ srl(new_value_lo, 0, new_value_lo); 2834 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2835 // perform the compare and swap operation 2836 __ casx(addr, t1, t2); 2837 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2838 // overwritten with the original value in "addr" and will be equal to t1. 2839 // Produce icc flag for 32bit. 2840 __ sub(t1, t2, t2); 2841 __ srlx(t2, 32, t1); 2842 __ orcc(t2, t1, G0); 2843 #endif 2844 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2845 Register addr = op->addr()->as_pointer_register(); 2846 Register cmp_value = op->cmp_value()->as_register(); 2847 Register new_value = op->new_value()->as_register(); 2848 Register t1 = op->tmp1()->as_register(); 2849 Register t2 = op->tmp2()->as_register(); 2850 __ mov(cmp_value, t1); 2851 __ mov(new_value, t2); 2852 if (op->code() == lir_cas_obj) { 2853 if (UseCompressedOops) { 2854 __ encode_heap_oop(t1); 2855 __ encode_heap_oop(t2); 2856 __ cas(addr, t1, t2); 2857 } else { 2858 __ cas_ptr(addr, t1, t2); 2859 } 2860 } else { 2861 __ cas(addr, t1, t2); 2862 } 2863 __ cmp(t1, t2); 2864 } else { 2865 Unimplemented(); 2866 } 2867 } 2868 2869 void LIR_Assembler::set_24bit_FPU() { 2870 Unimplemented(); 2871 } 2872 2873 2874 void LIR_Assembler::reset_FPU() { 2875 Unimplemented(); 2876 } 2877 2878 2879 void LIR_Assembler::breakpoint() { 2880 __ breakpoint_trap(); 2881 } 2882 2883 2884 void LIR_Assembler::push(LIR_Opr opr) { 2885 Unimplemented(); 2886 } 2887 2888 2889 void LIR_Assembler::pop(LIR_Opr opr) { 2890 Unimplemented(); 2891 } 2892 2893 2894 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2895 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2896 Register dst = dst_opr->as_register(); 2897 Register reg = mon_addr.base(); 2898 int offset = mon_addr.disp(); 2899 // compute pointer to BasicLock 2900 if (mon_addr.is_simm13()) { 2901 __ add(reg, offset, dst); 2902 } else { 2903 __ set(offset, dst); 2904 __ add(dst, reg, dst); 2905 } 2906 } 2907 2908 2909 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2910 Register obj = op->obj_opr()->as_register(); 2911 Register hdr = op->hdr_opr()->as_register(); 2912 Register lock = op->lock_opr()->as_register(); 2913 2914 // obj may not be an oop 2915 if (op->code() == lir_lock) { 2916 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2917 if (UseFastLocking) { 2918 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2919 // add debug info for NullPointerException only if one is possible 2920 if (op->info() != NULL) { 2921 add_debug_info_for_null_check_here(op->info()); 2922 } 2923 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2924 } else { 2925 // always do slow locking 2926 // note: the slow locking code could be inlined here, however if we use 2927 // slow locking, speed doesn't matter anyway and this solution is 2928 // simpler and requires less duplicated code - additionally, the 2929 // slow locking code is the same in either case which simplifies 2930 // debugging 2931 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2932 __ delayed()->nop(); 2933 } 2934 } else { 2935 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2936 if (UseFastLocking) { 2937 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2938 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2939 } else { 2940 // always do slow unlocking 2941 // note: the slow unlocking code could be inlined here, however if we use 2942 // slow unlocking, speed doesn't matter anyway and this solution is 2943 // simpler and requires less duplicated code - additionally, the 2944 // slow unlocking code is the same in either case which simplifies 2945 // debugging 2946 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2947 __ delayed()->nop(); 2948 } 2949 } 2950 __ bind(*op->stub()->continuation()); 2951 } 2952 2953 2954 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2955 ciMethod* method = op->profiled_method(); 2956 int bci = op->profiled_bci(); 2957 2958 // Update counter for all call types 2959 ciMethodData* md = method->method_data_or_null(); 2960 assert(md != NULL, "Sanity"); 2961 ciProfileData* data = md->bci_to_data(bci); 2962 assert(data->is_CounterData(), "need CounterData for calls"); 2963 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2964 Register mdo = op->mdo()->as_register(); 2965 #ifdef _LP64 2966 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2967 Register tmp1 = op->tmp1()->as_register_lo(); 2968 #else 2969 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2970 Register tmp1 = op->tmp1()->as_register(); 2971 #endif 2972 jobject2reg(md->constant_encoding(), mdo); 2973 int mdo_offset_bias = 0; 2974 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2975 data->size_in_bytes())) { 2976 // The offset is large so bias the mdo by the base of the slot so 2977 // that the ld can use simm13s to reference the slots of the data 2978 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2979 __ set(mdo_offset_bias, O7); 2980 __ add(mdo, O7, mdo); 2981 } 2982 2983 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2984 Bytecodes::Code bc = method->java_code_at_bci(bci); 2985 // Perform additional virtual call profiling for invokevirtual and 2986 // invokeinterface bytecodes 2987 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2988 C1ProfileVirtualCalls) { 2989 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2990 Register recv = op->recv()->as_register(); 2991 assert_different_registers(mdo, tmp1, recv); 2992 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2993 ciKlass* known_klass = op->known_holder(); 2994 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2995 // We know the type that will be seen at this call site; we can 2996 // statically update the methodDataOop rather than needing to do 2997 // dynamic tests on the receiver type 2998 2999 // NOTE: we should probably put a lock around this search to 3000 // avoid collisions by concurrent compilations 3001 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3002 uint i; 3003 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3004 ciKlass* receiver = vc_data->receiver(i); 3005 if (known_klass->equals(receiver)) { 3006 Address data_addr(mdo, md->byte_offset_of_slot(data, 3007 VirtualCallData::receiver_count_offset(i)) - 3008 mdo_offset_bias); 3009 __ ld_ptr(data_addr, tmp1); 3010 __ add(tmp1, DataLayout::counter_increment, tmp1); 3011 __ st_ptr(tmp1, data_addr); 3012 return; 3013 } 3014 } 3015 3016 // Receiver type not found in profile data; select an empty slot 3017 3018 // Note that this is less efficient than it should be because it 3019 // always does a write to the receiver part of the 3020 // VirtualCallData rather than just the first time 3021 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3022 ciKlass* receiver = vc_data->receiver(i); 3023 if (receiver == NULL) { 3024 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 3025 mdo_offset_bias); 3026 jobject2reg(known_klass->constant_encoding(), tmp1); 3027 __ st_ptr(tmp1, recv_addr); 3028 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 3029 mdo_offset_bias); 3030 __ ld_ptr(data_addr, tmp1); 3031 __ add(tmp1, DataLayout::counter_increment, tmp1); 3032 __ st_ptr(tmp1, data_addr); 3033 return; 3034 } 3035 } 3036 } else { 3037 __ load_klass(recv, recv); 3038 Label update_done; 3039 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 3040 // Receiver did not match any saved receiver and there is no empty row for it. 3041 // Increment total counter to indicate polymorphic case. 3042 __ ld_ptr(counter_addr, tmp1); 3043 __ add(tmp1, DataLayout::counter_increment, tmp1); 3044 __ st_ptr(tmp1, counter_addr); 3045 3046 __ bind(update_done); 3047 } 3048 } else { 3049 // Static call 3050 __ ld_ptr(counter_addr, tmp1); 3051 __ add(tmp1, DataLayout::counter_increment, tmp1); 3052 __ st_ptr(tmp1, counter_addr); 3053 } 3054 } 3055 3056 void LIR_Assembler::align_backward_branch_target() { 3057 __ align(OptoLoopAlignment); 3058 } 3059 3060 3061 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 3062 // make sure we are expecting a delay 3063 // this has the side effect of clearing the delay state 3064 // so we can use _masm instead of _masm->delayed() to do the 3065 // code generation. 3066 __ delayed(); 3067 3068 // make sure we only emit one instruction 3069 int offset = code_offset(); 3070 op->delay_op()->emit_code(this); 3071 #ifdef ASSERT 3072 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3073 op->delay_op()->print(); 3074 } 3075 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3076 "only one instruction can go in a delay slot"); 3077 #endif 3078 3079 // we may also be emitting the call info for the instruction 3080 // which we are the delay slot of. 3081 CodeEmitInfo* call_info = op->call_info(); 3082 if (call_info) { 3083 add_call_info(code_offset(), call_info); 3084 } 3085 3086 if (VerifyStackAtCalls) { 3087 _masm->sub(FP, SP, O7); 3088 _masm->cmp(O7, initial_frame_size_in_bytes()); 3089 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3090 } 3091 } 3092 3093 3094 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3095 assert(left->is_register(), "can only handle registers"); 3096 3097 if (left->is_single_cpu()) { 3098 __ neg(left->as_register(), dest->as_register()); 3099 } else if (left->is_single_fpu()) { 3100 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3101 } else if (left->is_double_fpu()) { 3102 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3103 } else { 3104 assert (left->is_double_cpu(), "Must be a long"); 3105 Register Rlow = left->as_register_lo(); 3106 Register Rhi = left->as_register_hi(); 3107 #ifdef _LP64 3108 __ sub(G0, Rlow, dest->as_register_lo()); 3109 #else 3110 __ subcc(G0, Rlow, dest->as_register_lo()); 3111 __ subc (G0, Rhi, dest->as_register_hi()); 3112 #endif 3113 } 3114 } 3115 3116 3117 void LIR_Assembler::fxch(int i) { 3118 Unimplemented(); 3119 } 3120 3121 void LIR_Assembler::fld(int i) { 3122 Unimplemented(); 3123 } 3124 3125 void LIR_Assembler::ffree(int i) { 3126 Unimplemented(); 3127 } 3128 3129 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3130 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3131 3132 // if tmp is invalid, then the function being called doesn't destroy the thread 3133 if (tmp->is_valid()) { 3134 __ save_thread(tmp->as_register()); 3135 } 3136 __ call(dest, relocInfo::runtime_call_type); 3137 __ delayed()->nop(); 3138 if (info != NULL) { 3139 add_call_info_here(info); 3140 } 3141 if (tmp->is_valid()) { 3142 __ restore_thread(tmp->as_register()); 3143 } 3144 3145 #ifdef ASSERT 3146 __ verify_thread(); 3147 #endif // ASSERT 3148 } 3149 3150 3151 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3152 #ifdef _LP64 3153 ShouldNotReachHere(); 3154 #endif 3155 3156 NEEDS_CLEANUP; 3157 if (type == T_LONG) { 3158 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3159 3160 // (extended to allow indexed as well as constant displaced for JSR-166) 3161 Register idx = noreg; // contains either constant offset or index 3162 3163 int disp = mem_addr->disp(); 3164 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3165 if (!Assembler::is_simm13(disp)) { 3166 idx = O7; 3167 __ set(disp, idx); 3168 } 3169 } else { 3170 assert(disp == 0, "not both indexed and disp"); 3171 idx = mem_addr->index()->as_register(); 3172 } 3173 3174 int null_check_offset = -1; 3175 3176 Register base = mem_addr->base()->as_register(); 3177 if (src->is_register() && dest->is_address()) { 3178 // G4 is high half, G5 is low half 3179 if (VM_Version::v9_instructions_work()) { 3180 // clear the top bits of G5, and scale up G4 3181 __ srl (src->as_register_lo(), 0, G5); 3182 __ sllx(src->as_register_hi(), 32, G4); 3183 // combine the two halves into the 64 bits of G4 3184 __ or3(G4, G5, G4); 3185 null_check_offset = __ offset(); 3186 if (idx == noreg) { 3187 __ stx(G4, base, disp); 3188 } else { 3189 __ stx(G4, base, idx); 3190 } 3191 } else { 3192 __ mov (src->as_register_hi(), G4); 3193 __ mov (src->as_register_lo(), G5); 3194 null_check_offset = __ offset(); 3195 if (idx == noreg) { 3196 __ std(G4, base, disp); 3197 } else { 3198 __ std(G4, base, idx); 3199 } 3200 } 3201 } else if (src->is_address() && dest->is_register()) { 3202 null_check_offset = __ offset(); 3203 if (VM_Version::v9_instructions_work()) { 3204 if (idx == noreg) { 3205 __ ldx(base, disp, G5); 3206 } else { 3207 __ ldx(base, idx, G5); 3208 } 3209 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3210 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3211 } else { 3212 if (idx == noreg) { 3213 __ ldd(base, disp, G4); 3214 } else { 3215 __ ldd(base, idx, G4); 3216 } 3217 // G4 is high half, G5 is low half 3218 __ mov (G4, dest->as_register_hi()); 3219 __ mov (G5, dest->as_register_lo()); 3220 } 3221 } else { 3222 Unimplemented(); 3223 } 3224 if (info != NULL) { 3225 add_debug_info_for_null_check(null_check_offset, info); 3226 } 3227 3228 } else { 3229 // use normal move for all other volatiles since they don't need 3230 // special handling to remain atomic. 3231 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3232 } 3233 } 3234 3235 void LIR_Assembler::membar() { 3236 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3237 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3238 } 3239 3240 void LIR_Assembler::membar_acquire() { 3241 // no-op on TSO 3242 } 3243 3244 void LIR_Assembler::membar_release() { 3245 // no-op on TSO 3246 } 3247 3248 // Pack two sequential registers containing 32 bit values 3249 // into a single 64 bit register. 3250 // src and src->successor() are packed into dst 3251 // src and dst may be the same register. 3252 // Note: src is destroyed 3253 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3254 Register rs = src->as_register(); 3255 Register rd = dst->as_register_lo(); 3256 __ sllx(rs, 32, rs); 3257 __ srl(rs->successor(), 0, rs->successor()); 3258 __ or3(rs, rs->successor(), rd); 3259 } 3260 3261 // Unpack a 64 bit value in a register into 3262 // two sequential registers. 3263 // src is unpacked into dst and dst->successor() 3264 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3265 Register rs = src->as_register_lo(); 3266 Register rd = dst->as_register_hi(); 3267 assert_different_registers(rs, rd, rd->successor()); 3268 __ srlx(rs, 32, rd); 3269 __ srl (rs, 0, rd->successor()); 3270 } 3271 3272 3273 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 3274 LIR_Address* addr = addr_opr->as_address_ptr(); 3275 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); 3276 3277 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3278 } 3279 3280 3281 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3282 assert(result_reg->is_register(), "check"); 3283 __ mov(G2_thread, result_reg->as_register()); 3284 } 3285 3286 3287 void LIR_Assembler::peephole(LIR_List* lir) { 3288 LIR_OpList* inst = lir->instructions_list(); 3289 for (int i = 0; i < inst->length(); i++) { 3290 LIR_Op* op = inst->at(i); 3291 switch (op->code()) { 3292 case lir_cond_float_branch: 3293 case lir_branch: { 3294 LIR_OpBranch* branch = op->as_OpBranch(); 3295 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3296 LIR_Op* delay_op = NULL; 3297 // we'd like to be able to pull following instructions into 3298 // this slot but we don't know enough to do it safely yet so 3299 // only optimize block to block control flow. 3300 if (LIRFillDelaySlots && branch->block()) { 3301 LIR_Op* prev = inst->at(i - 1); 3302 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3303 // swap previous instruction into delay slot 3304 inst->at_put(i - 1, op); 3305 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3306 #ifndef PRODUCT 3307 if (LIRTracePeephole) { 3308 tty->print_cr("delayed"); 3309 inst->at(i - 1)->print(); 3310 inst->at(i)->print(); 3311 tty->cr(); 3312 } 3313 #endif 3314 continue; 3315 } 3316 } 3317 3318 if (!delay_op) { 3319 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3320 } 3321 inst->insert_before(i + 1, delay_op); 3322 break; 3323 } 3324 case lir_static_call: 3325 case lir_virtual_call: 3326 case lir_icvirtual_call: 3327 case lir_optvirtual_call: 3328 case lir_dynamic_call: { 3329 LIR_Op* prev = inst->at(i - 1); 3330 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3331 (op->code() != lir_virtual_call || 3332 !prev->result_opr()->is_single_cpu() || 3333 prev->result_opr()->as_register() != O0) && 3334 LIR_Assembler::is_single_instruction(prev)) { 3335 // Only moves without info can be put into the delay slot. 3336 // Also don't allow the setup of the receiver in the delay 3337 // slot for vtable calls. 3338 inst->at_put(i - 1, op); 3339 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3340 #ifndef PRODUCT 3341 if (LIRTracePeephole) { 3342 tty->print_cr("delayed"); 3343 inst->at(i - 1)->print(); 3344 inst->at(i)->print(); 3345 tty->cr(); 3346 } 3347 #endif 3348 } else { 3349 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3350 inst->insert_before(i + 1, delay_op); 3351 i++; 3352 } 3353 3354 #if defined(TIERED) && !defined(_LP64) 3355 // fixup the return value from G1 to O0/O1 for long returns. 3356 // It's done here instead of in LIRGenerator because there's 3357 // such a mismatch between the single reg and double reg 3358 // calling convention. 3359 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3360 if (callop->result_opr() == FrameMap::out_long_opr) { 3361 LIR_OpJavaCall* call; 3362 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3363 for (int a = 0; a < arguments->length(); a++) { 3364 arguments[a] = callop->arguments()[a]; 3365 } 3366 if (op->code() == lir_virtual_call) { 3367 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3368 callop->vtable_offset(), arguments, callop->info()); 3369 } else { 3370 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3371 callop->addr(), arguments, callop->info()); 3372 } 3373 inst->at_put(i - 1, call); 3374 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3375 T_LONG, lir_patch_none, NULL)); 3376 } 3377 #endif 3378 break; 3379 } 3380 } 3381 } 3382 } 3383 3384 3385 3386 3387 #undef __