1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "memory/barrierSet.hpp" 35 #include "memory/cardTableModRefBS.hpp" 36 #include "nativeInst_sparc.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 #define __ _masm-> 41 42 43 //------------------------------------------------------------ 44 45 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 47 if (opr->is_constant()) { 48 LIR_Const* constant = opr->as_constant_ptr(); 49 switch (constant->type()) { 50 case T_INT: { 51 jint value = constant->as_jint(); 52 return Assembler::is_simm13(value); 53 } 54 55 default: 56 return false; 57 } 58 } 59 return false; 60 } 61 62 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 64 switch (op->code()) { 65 case lir_null_check: 66 return true; 67 68 69 case lir_add: 70 case lir_ushr: 71 case lir_shr: 72 case lir_shl: 73 // integer shifts and adds are always one instruction 74 return op->result_opr()->is_single_cpu(); 75 76 77 case lir_move: { 78 LIR_Op1* op1 = op->as_Op1(); 79 LIR_Opr src = op1->in_opr(); 80 LIR_Opr dst = op1->result_opr(); 81 82 if (src == dst) { 83 NEEDS_CLEANUP; 84 // this works around a problem where moves with the same src and dst 85 // end up in the delay slot and then the assembler swallows the mov 86 // since it has no effect and then it complains because the delay slot 87 // is empty. returning false stops the optimizer from putting this in 88 // the delay slot 89 return false; 90 } 91 92 // don't put moves involving oops into the delay slot since the VerifyOops code 93 // will make it much larger than a single instruction. 94 if (VerifyOops) { 95 return false; 96 } 97 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 100 return false; 101 } 102 103 if (UseCompressedOops) { 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 106 } 107 108 if (dst->is_register()) { 109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 110 return !PatchALot; 111 } else if (src->is_single_stack()) { 112 return true; 113 } 114 } 115 116 if (src->is_register()) { 117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 118 return !PatchALot; 119 } else if (dst->is_single_stack()) { 120 return true; 121 } 122 } 123 124 if (dst->is_register() && 125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 127 return true; 128 } 129 130 return false; 131 } 132 133 default: 134 return false; 135 } 136 ShouldNotReachHere(); 137 } 138 139 140 LIR_Opr LIR_Assembler::receiverOpr() { 141 return FrameMap::O0_oop_opr; 142 } 143 144 145 LIR_Opr LIR_Assembler::osrBufferPointer() { 146 return FrameMap::I0_opr; 147 } 148 149 150 int LIR_Assembler::initial_frame_size_in_bytes() { 151 return in_bytes(frame_map()->framesize_in_bytes()); 152 } 153 154 155 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 156 // we fetch the class of the receiver (O0) and compare it with the cached class. 157 // If they do not match we jump to slow case. 158 int LIR_Assembler::check_icache() { 159 int offset = __ offset(); 160 __ inline_cache_check(O0, G5_inline_cache_reg); 161 return offset; 162 } 163 164 165 void LIR_Assembler::osr_entry() { 166 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 167 // 168 // 1. Create a new compiled activation. 169 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 170 // at the osr_bci; it is not initialized. 171 // 3. Jump to the continuation address in compiled code to resume execution. 172 173 // OSR entry point 174 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 175 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 176 ValueStack* entry_state = osr_entry->end()->state(); 177 int number_of_locks = entry_state->locks_size(); 178 179 // Create a frame for the compiled activation. 180 __ build_frame(initial_frame_size_in_bytes()); 181 182 // OSR buffer is 183 // 184 // locals[nlocals-1..0] 185 // monitors[number_of_locks-1..0] 186 // 187 // locals is a direct copy of the interpreter frame so in the osr buffer 188 // so first slot in the local array is the last local from the interpreter 189 // and last slot is local[0] (receiver) from the interpreter 190 // 191 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 192 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 193 // in the interpreter frame (the method lock if a sync method) 194 195 // Initialize monitors in the compiled activation. 196 // I0: pointer to osr buffer 197 // 198 // All other registers are dead at this point and the locals will be 199 // copied into place by code emitted in the IR. 200 201 Register OSR_buf = osrBufferPointer()->as_register(); 202 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 203 int monitor_offset = BytesPerWord * method()->max_locals() + 204 (2 * BytesPerWord) * (number_of_locks - 1); 205 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 206 // the OSR buffer using 2 word entries: first the lock and then 207 // the oop. 208 for (int i = 0; i < number_of_locks; i++) { 209 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 210 #ifdef ASSERT 211 // verify the interpreter's monitor has a non-null object 212 { 213 Label L; 214 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 215 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 216 __ stop("locked object is NULL"); 217 __ bind(L); 218 } 219 #endif // ASSERT 220 // Copy the lock field into the compiled activation. 221 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 222 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 223 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 224 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 225 } 226 } 227 } 228 229 230 // Optimized Library calls 231 // This is the fast version of java.lang.String.compare; it has not 232 // OSR-entry and therefore, we generate a slow version for OSR's 233 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { 234 Register str0 = left->as_register(); 235 Register str1 = right->as_register(); 236 237 Label Ldone; 238 239 Register result = dst->as_register(); 240 { 241 // Get a pointer to the first character of string0 in tmp0 242 // and get string0.length() in str0 243 // Get a pointer to the first character of string1 in tmp1 244 // and get string1.length() in str1 245 // Also, get string0.length()-string1.length() in 246 // o7 and get the condition code set 247 // Note: some instructions have been hoisted for better instruction scheduling 248 249 Register tmp0 = L0; 250 Register tmp1 = L1; 251 Register tmp2 = L2; 252 253 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array 254 if (java_lang_String::has_offset_field()) { 255 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 256 int count_offset = java_lang_String:: count_offset_in_bytes(); 257 __ load_heap_oop(str0, value_offset, tmp0); 258 __ ld(str0, offset_offset, tmp2); 259 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 260 __ ld(str0, count_offset, str0); 261 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 262 } else { 263 __ load_heap_oop(str0, value_offset, tmp1); 264 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 265 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0); 266 } 267 268 // str1 may be null 269 add_debug_info_for_null_check_here(info); 270 271 if (java_lang_String::has_offset_field()) { 272 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 273 int count_offset = java_lang_String:: count_offset_in_bytes(); 274 __ load_heap_oop(str1, value_offset, tmp1); 275 __ add(tmp0, tmp2, tmp0); 276 277 __ ld(str1, offset_offset, tmp2); 278 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 279 __ ld(str1, count_offset, str1); 280 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 281 __ add(tmp1, tmp2, tmp1); 282 } else { 283 __ load_heap_oop(str1, value_offset, tmp2); 284 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 285 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1); 286 } 287 __ subcc(str0, str1, O7); 288 } 289 290 { 291 // Compute the minimum of the string lengths, scale it and store it in limit 292 Register count0 = I0; 293 Register count1 = I1; 294 Register limit = L3; 295 296 Label Lskip; 297 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter 298 __ br(Assembler::greater, true, Assembler::pt, Lskip); 299 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter 300 __ bind(Lskip); 301 302 // If either string is empty (or both of them) the result is the difference in lengths 303 __ cmp(limit, 0); 304 __ br(Assembler::equal, true, Assembler::pn, Ldone); 305 __ delayed()->mov(O7, result); // result is difference in lengths 306 } 307 308 { 309 // Neither string is empty 310 Label Lloop; 311 312 Register base0 = L0; 313 Register base1 = L1; 314 Register chr0 = I0; 315 Register chr1 = I1; 316 Register limit = L3; 317 318 // Shift base0 and base1 to the end of the arrays, negate limit 319 __ add(base0, limit, base0); 320 __ add(base1, limit, base1); 321 __ neg(limit); // limit = -min{string0.length(), string1.length()} 322 323 __ lduh(base0, limit, chr0); 324 __ bind(Lloop); 325 __ lduh(base1, limit, chr1); 326 __ subcc(chr0, chr1, chr0); 327 __ br(Assembler::notZero, false, Assembler::pn, Ldone); 328 assert(chr0 == result, "result must be pre-placed"); 329 __ delayed()->inccc(limit, sizeof(jchar)); 330 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 331 __ delayed()->lduh(base0, limit, chr0); 332 } 333 334 // If strings are equal up to min length, return the length difference. 335 __ mov(O7, result); 336 337 // Otherwise, return the difference between the first mismatched chars. 338 __ bind(Ldone); 339 } 340 341 342 // -------------------------------------------------------------------------------------------- 343 344 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 345 if (!GenerateSynchronizationCode) return; 346 347 Register obj_reg = obj_opr->as_register(); 348 Register lock_reg = lock_opr->as_register(); 349 350 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 351 Register reg = mon_addr.base(); 352 int offset = mon_addr.disp(); 353 // compute pointer to BasicLock 354 if (mon_addr.is_simm13()) { 355 __ add(reg, offset, lock_reg); 356 } 357 else { 358 __ set(offset, lock_reg); 359 __ add(reg, lock_reg, lock_reg); 360 } 361 // unlock object 362 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 363 // _slow_case_stubs->append(slow_case); 364 // temporary fix: must be created after exceptionhandler, therefore as call stub 365 _slow_case_stubs->append(slow_case); 366 if (UseFastLocking) { 367 // try inlined fast unlocking first, revert to slow locking if it fails 368 // note: lock_reg points to the displaced header since the displaced header offset is 0! 369 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 370 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 371 } else { 372 // always do slow unlocking 373 // note: the slow unlocking code could be inlined here, however if we use 374 // slow unlocking, speed doesn't matter anyway and this solution is 375 // simpler and requires less duplicated code - additionally, the 376 // slow unlocking code is the same in either case which simplifies 377 // debugging 378 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 379 __ delayed()->nop(); 380 } 381 // done 382 __ bind(*slow_case->continuation()); 383 } 384 385 386 int LIR_Assembler::emit_exception_handler() { 387 // if the last instruction is a call (typically to do a throw which 388 // is coming at the end after block reordering) the return address 389 // must still point into the code area in order to avoid assertion 390 // failures when searching for the corresponding bci => add a nop 391 // (was bug 5/14/1999 - gri) 392 __ nop(); 393 394 // generate code for exception handler 395 ciMethod* method = compilation()->method(); 396 397 address handler_base = __ start_a_stub(exception_handler_size); 398 399 if (handler_base == NULL) { 400 // not enough space left for the handler 401 bailout("exception handler overflow"); 402 return -1; 403 } 404 405 int offset = code_offset(); 406 407 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 408 __ delayed()->nop(); 409 __ should_not_reach_here(); 410 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 411 __ end_a_stub(); 412 413 return offset; 414 } 415 416 417 // Emit the code to remove the frame from the stack in the exception 418 // unwind path. 419 int LIR_Assembler::emit_unwind_handler() { 420 #ifndef PRODUCT 421 if (CommentedAssembly) { 422 _masm->block_comment("Unwind handler"); 423 } 424 #endif 425 426 int offset = code_offset(); 427 428 // Fetch the exception from TLS and clear out exception related thread state 429 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 430 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 431 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 432 433 __ bind(_unwind_handler_entry); 434 __ verify_not_null_oop(O0); 435 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 436 __ mov(O0, I0); // Preserve the exception 437 } 438 439 // Preform needed unlocking 440 MonitorExitStub* stub = NULL; 441 if (method()->is_synchronized()) { 442 monitor_address(0, FrameMap::I1_opr); 443 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 444 __ unlock_object(I3, I2, I1, *stub->entry()); 445 __ bind(*stub->continuation()); 446 } 447 448 if (compilation()->env()->dtrace_method_probes()) { 449 __ mov(G2_thread, O0); 450 jobject2reg(method()->constant_encoding(), O1); 451 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 452 __ delayed()->nop(); 453 } 454 455 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 456 __ mov(I0, O0); // Restore the exception 457 } 458 459 // dispatch to the unwind logic 460 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 461 __ delayed()->nop(); 462 463 // Emit the slow path assembly 464 if (stub != NULL) { 465 stub->emit_code(this); 466 } 467 468 return offset; 469 } 470 471 472 int LIR_Assembler::emit_deopt_handler() { 473 // if the last instruction is a call (typically to do a throw which 474 // is coming at the end after block reordering) the return address 475 // must still point into the code area in order to avoid assertion 476 // failures when searching for the corresponding bci => add a nop 477 // (was bug 5/14/1999 - gri) 478 __ nop(); 479 480 // generate code for deopt handler 481 ciMethod* method = compilation()->method(); 482 address handler_base = __ start_a_stub(deopt_handler_size); 483 if (handler_base == NULL) { 484 // not enough space left for the handler 485 bailout("deopt handler overflow"); 486 return -1; 487 } 488 489 int offset = code_offset(); 490 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 491 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 492 __ delayed()->nop(); 493 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 494 __ end_a_stub(); 495 496 return offset; 497 } 498 499 500 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 501 if (o == NULL) { 502 __ set(NULL_WORD, reg); 503 } else { 504 int oop_index = __ oop_recorder()->find_index(o); 505 RelocationHolder rspec = oop_Relocation::spec(oop_index); 506 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 507 } 508 } 509 510 511 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 512 // Allocate a new index in oop table to hold the oop once it's been patched 513 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); 514 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); 515 516 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 517 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 518 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 519 // NULL will be dynamically patched later and the patched value may be large. We must 520 // therefore generate the sethi/add as a placeholders 521 __ patchable_set(addrlit, reg); 522 523 patching_epilog(patch, lir_patch_normal, reg, info); 524 } 525 526 527 void LIR_Assembler::emit_op3(LIR_Op3* op) { 528 Register Rdividend = op->in_opr1()->as_register(); 529 Register Rdivisor = noreg; 530 Register Rscratch = op->in_opr3()->as_register(); 531 Register Rresult = op->result_opr()->as_register(); 532 int divisor = -1; 533 534 if (op->in_opr2()->is_register()) { 535 Rdivisor = op->in_opr2()->as_register(); 536 } else { 537 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 538 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 539 } 540 541 assert(Rdividend != Rscratch, ""); 542 assert(Rdivisor != Rscratch, ""); 543 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 544 545 if (Rdivisor == noreg && is_power_of_2(divisor)) { 546 // convert division by a power of two into some shifts and logical operations 547 if (op->code() == lir_idiv) { 548 if (divisor == 2) { 549 __ srl(Rdividend, 31, Rscratch); 550 } else { 551 __ sra(Rdividend, 31, Rscratch); 552 __ and3(Rscratch, divisor - 1, Rscratch); 553 } 554 __ add(Rdividend, Rscratch, Rscratch); 555 __ sra(Rscratch, log2_intptr(divisor), Rresult); 556 return; 557 } else { 558 if (divisor == 2) { 559 __ srl(Rdividend, 31, Rscratch); 560 } else { 561 __ sra(Rdividend, 31, Rscratch); 562 __ and3(Rscratch, divisor - 1,Rscratch); 563 } 564 __ add(Rdividend, Rscratch, Rscratch); 565 __ andn(Rscratch, divisor - 1,Rscratch); 566 __ sub(Rdividend, Rscratch, Rresult); 567 return; 568 } 569 } 570 571 __ sra(Rdividend, 31, Rscratch); 572 __ wry(Rscratch); 573 if (!VM_Version::v9_instructions_work()) { 574 // v9 doesn't require these nops 575 __ nop(); 576 __ nop(); 577 __ nop(); 578 __ nop(); 579 } 580 581 add_debug_info_for_div0_here(op->info()); 582 583 if (Rdivisor != noreg) { 584 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 585 } else { 586 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 587 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 588 } 589 590 Label skip; 591 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 592 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 593 __ bind(skip); 594 595 if (op->code() == lir_irem) { 596 if (Rdivisor != noreg) { 597 __ smul(Rscratch, Rdivisor, Rscratch); 598 } else { 599 __ smul(Rscratch, divisor, Rscratch); 600 } 601 __ sub(Rdividend, Rscratch, Rresult); 602 } 603 } 604 605 606 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 607 #ifdef ASSERT 608 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 609 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 610 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 611 #endif 612 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 613 614 if (op->cond() == lir_cond_always) { 615 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 616 } else if (op->code() == lir_cond_float_branch) { 617 assert(op->ublock() != NULL, "must have unordered successor"); 618 bool is_unordered = (op->ublock() == op->block()); 619 Assembler::Condition acond; 620 switch (op->cond()) { 621 case lir_cond_equal: acond = Assembler::f_equal; break; 622 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 623 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 624 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 625 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 626 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 627 default : ShouldNotReachHere(); 628 }; 629 630 if (!VM_Version::v9_instructions_work()) { 631 __ nop(); 632 } 633 __ fb( acond, false, Assembler::pn, *(op->label())); 634 } else { 635 assert (op->code() == lir_branch, "just checking"); 636 637 Assembler::Condition acond; 638 switch (op->cond()) { 639 case lir_cond_equal: acond = Assembler::equal; break; 640 case lir_cond_notEqual: acond = Assembler::notEqual; break; 641 case lir_cond_less: acond = Assembler::less; break; 642 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 643 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 644 case lir_cond_greater: acond = Assembler::greater; break; 645 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 646 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 647 default: ShouldNotReachHere(); 648 }; 649 650 // sparc has different condition codes for testing 32-bit 651 // vs. 64-bit values. We could always test xcc is we could 652 // guarantee that 32-bit loads always sign extended but that isn't 653 // true and since sign extension isn't free, it would impose a 654 // slight cost. 655 #ifdef _LP64 656 if (op->type() == T_INT) { 657 __ br(acond, false, Assembler::pn, *(op->label())); 658 } else 659 #endif 660 __ brx(acond, false, Assembler::pn, *(op->label())); 661 } 662 // The peephole pass fills the delay slot 663 } 664 665 666 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 667 Bytecodes::Code code = op->bytecode(); 668 LIR_Opr dst = op->result_opr(); 669 670 switch(code) { 671 case Bytecodes::_i2l: { 672 Register rlo = dst->as_register_lo(); 673 Register rhi = dst->as_register_hi(); 674 Register rval = op->in_opr()->as_register(); 675 #ifdef _LP64 676 __ sra(rval, 0, rlo); 677 #else 678 __ mov(rval, rlo); 679 __ sra(rval, BitsPerInt-1, rhi); 680 #endif 681 break; 682 } 683 case Bytecodes::_i2d: 684 case Bytecodes::_i2f: { 685 bool is_double = (code == Bytecodes::_i2d); 686 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 687 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 688 FloatRegister rsrc = op->in_opr()->as_float_reg(); 689 if (rsrc != rdst) { 690 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 691 } 692 __ fitof(w, rdst, rdst); 693 break; 694 } 695 case Bytecodes::_f2i:{ 696 FloatRegister rsrc = op->in_opr()->as_float_reg(); 697 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 698 Label L; 699 // result must be 0 if value is NaN; test by comparing value to itself 700 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 701 if (!VM_Version::v9_instructions_work()) { 702 __ nop(); 703 } 704 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 705 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 706 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 707 // move integer result from float register to int register 708 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 709 __ bind (L); 710 break; 711 } 712 case Bytecodes::_l2i: { 713 Register rlo = op->in_opr()->as_register_lo(); 714 Register rhi = op->in_opr()->as_register_hi(); 715 Register rdst = dst->as_register(); 716 #ifdef _LP64 717 __ sra(rlo, 0, rdst); 718 #else 719 __ mov(rlo, rdst); 720 #endif 721 break; 722 } 723 case Bytecodes::_d2f: 724 case Bytecodes::_f2d: { 725 bool is_double = (code == Bytecodes::_f2d); 726 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 727 LIR_Opr val = op->in_opr(); 728 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 729 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 730 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 731 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 732 __ ftof(vw, dw, rval, rdst); 733 break; 734 } 735 case Bytecodes::_i2s: 736 case Bytecodes::_i2b: { 737 Register rval = op->in_opr()->as_register(); 738 Register rdst = dst->as_register(); 739 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 740 __ sll (rval, shift, rdst); 741 __ sra (rdst, shift, rdst); 742 break; 743 } 744 case Bytecodes::_i2c: { 745 Register rval = op->in_opr()->as_register(); 746 Register rdst = dst->as_register(); 747 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 748 __ sll (rval, shift, rdst); 749 __ srl (rdst, shift, rdst); 750 break; 751 } 752 753 default: ShouldNotReachHere(); 754 } 755 } 756 757 758 void LIR_Assembler::align_call(LIR_Code) { 759 // do nothing since all instructions are word aligned on sparc 760 } 761 762 763 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 764 __ call(op->addr(), rtype); 765 // The peephole pass fills the delay slot, add_call_info is done in 766 // LIR_Assembler::emit_delay. 767 } 768 769 770 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 771 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); 772 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); 773 __ relocate(rspec); 774 __ call(op->addr(), relocInfo::none); 775 // The peephole pass fills the delay slot, add_call_info is done in 776 // LIR_Assembler::emit_delay. 777 } 778 779 780 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 781 add_debug_info_for_null_check_here(op->info()); 782 __ load_klass(O0, G3_scratch); 783 if (Assembler::is_simm13(op->vtable_offset())) { 784 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 785 } else { 786 // This will generate 2 instructions 787 __ set(op->vtable_offset(), G5_method); 788 // ld_ptr, set_hi, set 789 __ ld_ptr(G3_scratch, G5_method, G5_method); 790 } 791 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); 792 __ callr(G3_scratch, G0); 793 // the peephole pass fills the delay slot 794 } 795 796 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 797 int store_offset; 798 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 799 assert(!unaligned, "can't handle this"); 800 // for offsets larger than a simm13 we setup the offset in O7 801 __ set(offset, O7); 802 store_offset = store(from_reg, base, O7, type, wide); 803 } else { 804 if (type == T_ARRAY || type == T_OBJECT) { 805 __ verify_oop(from_reg->as_register()); 806 } 807 store_offset = code_offset(); 808 switch (type) { 809 case T_BOOLEAN: // fall through 810 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 811 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 812 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 813 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 814 case T_LONG : 815 #ifdef _LP64 816 if (unaligned || PatchALot) { 817 __ srax(from_reg->as_register_lo(), 32, O7); 818 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 819 __ stw(O7, base, offset + hi_word_offset_in_bytes); 820 } else { 821 __ stx(from_reg->as_register_lo(), base, offset); 822 } 823 #else 824 assert(Assembler::is_simm13(offset + 4), "must be"); 825 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 826 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 827 #endif 828 break; 829 case T_ADDRESS: 830 __ st_ptr(from_reg->as_register(), base, offset); 831 break; 832 case T_ARRAY : // fall through 833 case T_OBJECT: 834 { 835 if (UseCompressedOops && !wide) { 836 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 837 store_offset = code_offset(); 838 __ stw(G3_scratch, base, offset); 839 } else { 840 __ st_ptr(from_reg->as_register(), base, offset); 841 } 842 break; 843 } 844 845 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 846 case T_DOUBLE: 847 { 848 FloatRegister reg = from_reg->as_double_reg(); 849 // split unaligned stores 850 if (unaligned || PatchALot) { 851 assert(Assembler::is_simm13(offset + 4), "must be"); 852 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 853 __ stf(FloatRegisterImpl::S, reg, base, offset); 854 } else { 855 __ stf(FloatRegisterImpl::D, reg, base, offset); 856 } 857 break; 858 } 859 default : ShouldNotReachHere(); 860 } 861 } 862 return store_offset; 863 } 864 865 866 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 867 if (type == T_ARRAY || type == T_OBJECT) { 868 __ verify_oop(from_reg->as_register()); 869 } 870 int store_offset = code_offset(); 871 switch (type) { 872 case T_BOOLEAN: // fall through 873 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 874 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 875 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 876 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 877 case T_LONG : 878 #ifdef _LP64 879 __ stx(from_reg->as_register_lo(), base, disp); 880 #else 881 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 882 __ std(from_reg->as_register_hi(), base, disp); 883 #endif 884 break; 885 case T_ADDRESS: 886 __ st_ptr(from_reg->as_register(), base, disp); 887 break; 888 case T_ARRAY : // fall through 889 case T_OBJECT: 890 { 891 if (UseCompressedOops && !wide) { 892 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 893 store_offset = code_offset(); 894 __ stw(G3_scratch, base, disp); 895 } else { 896 __ st_ptr(from_reg->as_register(), base, disp); 897 } 898 break; 899 } 900 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 901 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 902 default : ShouldNotReachHere(); 903 } 904 return store_offset; 905 } 906 907 908 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 909 int load_offset; 910 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 911 assert(base != O7, "destroying register"); 912 assert(!unaligned, "can't handle this"); 913 // for offsets larger than a simm13 we setup the offset in O7 914 __ set(offset, O7); 915 load_offset = load(base, O7, to_reg, type, wide); 916 } else { 917 load_offset = code_offset(); 918 switch(type) { 919 case T_BOOLEAN: // fall through 920 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 921 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 922 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 923 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 924 case T_LONG : 925 if (!unaligned) { 926 #ifdef _LP64 927 __ ldx(base, offset, to_reg->as_register_lo()); 928 #else 929 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 930 "must be sequential"); 931 __ ldd(base, offset, to_reg->as_register_hi()); 932 #endif 933 } else { 934 #ifdef _LP64 935 assert(base != to_reg->as_register_lo(), "can't handle this"); 936 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 937 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 938 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 939 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 940 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 941 #else 942 if (base == to_reg->as_register_lo()) { 943 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 944 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 945 } else { 946 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 947 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 948 } 949 #endif 950 } 951 break; 952 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; 953 case T_ARRAY : // fall through 954 case T_OBJECT: 955 { 956 if (UseCompressedOops && !wide) { 957 __ lduw(base, offset, to_reg->as_register()); 958 __ decode_heap_oop(to_reg->as_register()); 959 } else { 960 __ ld_ptr(base, offset, to_reg->as_register()); 961 } 962 break; 963 } 964 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 965 case T_DOUBLE: 966 { 967 FloatRegister reg = to_reg->as_double_reg(); 968 // split unaligned loads 969 if (unaligned || PatchALot) { 970 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 971 __ ldf(FloatRegisterImpl::S, base, offset, reg); 972 } else { 973 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 974 } 975 break; 976 } 977 default : ShouldNotReachHere(); 978 } 979 if (type == T_ARRAY || type == T_OBJECT) { 980 __ verify_oop(to_reg->as_register()); 981 } 982 } 983 return load_offset; 984 } 985 986 987 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 988 int load_offset = code_offset(); 989 switch(type) { 990 case T_BOOLEAN: // fall through 991 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 992 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 993 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 994 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 995 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 996 case T_ARRAY : // fall through 997 case T_OBJECT: 998 { 999 if (UseCompressedOops && !wide) { 1000 __ lduw(base, disp, to_reg->as_register()); 1001 __ decode_heap_oop(to_reg->as_register()); 1002 } else { 1003 __ ld_ptr(base, disp, to_reg->as_register()); 1004 } 1005 break; 1006 } 1007 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 1008 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 1009 case T_LONG : 1010 #ifdef _LP64 1011 __ ldx(base, disp, to_reg->as_register_lo()); 1012 #else 1013 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 1014 "must be sequential"); 1015 __ ldd(base, disp, to_reg->as_register_hi()); 1016 #endif 1017 break; 1018 default : ShouldNotReachHere(); 1019 } 1020 if (type == T_ARRAY || type == T_OBJECT) { 1021 __ verify_oop(to_reg->as_register()); 1022 } 1023 return load_offset; 1024 } 1025 1026 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 1027 LIR_Const* c = src->as_constant_ptr(); 1028 switch (c->type()) { 1029 case T_INT: 1030 case T_FLOAT: { 1031 Register src_reg = O7; 1032 int value = c->as_jint_bits(); 1033 if (value == 0) { 1034 src_reg = G0; 1035 } else { 1036 __ set(value, O7); 1037 } 1038 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1039 __ stw(src_reg, addr.base(), addr.disp()); 1040 break; 1041 } 1042 case T_ADDRESS: { 1043 Register src_reg = O7; 1044 int value = c->as_jint_bits(); 1045 if (value == 0) { 1046 src_reg = G0; 1047 } else { 1048 __ set(value, O7); 1049 } 1050 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1051 __ st_ptr(src_reg, addr.base(), addr.disp()); 1052 break; 1053 } 1054 case T_OBJECT: { 1055 Register src_reg = O7; 1056 jobject2reg(c->as_jobject(), src_reg); 1057 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1058 __ st_ptr(src_reg, addr.base(), addr.disp()); 1059 break; 1060 } 1061 case T_LONG: 1062 case T_DOUBLE: { 1063 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1064 1065 Register tmp = O7; 1066 int value_lo = c->as_jint_lo_bits(); 1067 if (value_lo == 0) { 1068 tmp = G0; 1069 } else { 1070 __ set(value_lo, O7); 1071 } 1072 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 1073 int value_hi = c->as_jint_hi_bits(); 1074 if (value_hi == 0) { 1075 tmp = G0; 1076 } else { 1077 __ set(value_hi, O7); 1078 } 1079 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 1080 break; 1081 } 1082 default: 1083 Unimplemented(); 1084 } 1085 } 1086 1087 1088 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1089 LIR_Const* c = src->as_constant_ptr(); 1090 LIR_Address* addr = dest->as_address_ptr(); 1091 Register base = addr->base()->as_pointer_register(); 1092 int offset = -1; 1093 1094 switch (c->type()) { 1095 case T_INT: 1096 case T_FLOAT: 1097 case T_ADDRESS: { 1098 LIR_Opr tmp = FrameMap::O7_opr; 1099 int value = c->as_jint_bits(); 1100 if (value == 0) { 1101 tmp = FrameMap::G0_opr; 1102 } else if (Assembler::is_simm13(value)) { 1103 __ set(value, O7); 1104 } 1105 if (addr->index()->is_valid()) { 1106 assert(addr->disp() == 0, "must be zero"); 1107 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1108 } else { 1109 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1110 offset = store(tmp, base, addr->disp(), type, wide, false); 1111 } 1112 break; 1113 } 1114 case T_LONG: 1115 case T_DOUBLE: { 1116 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1117 assert(Assembler::is_simm13(addr->disp()) && 1118 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1119 1120 LIR_Opr tmp = FrameMap::O7_opr; 1121 int value_lo = c->as_jint_lo_bits(); 1122 if (value_lo == 0) { 1123 tmp = FrameMap::G0_opr; 1124 } else { 1125 __ set(value_lo, O7); 1126 } 1127 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1128 int value_hi = c->as_jint_hi_bits(); 1129 if (value_hi == 0) { 1130 tmp = FrameMap::G0_opr; 1131 } else { 1132 __ set(value_hi, O7); 1133 } 1134 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1135 break; 1136 } 1137 case T_OBJECT: { 1138 jobject obj = c->as_jobject(); 1139 LIR_Opr tmp; 1140 if (obj == NULL) { 1141 tmp = FrameMap::G0_opr; 1142 } else { 1143 tmp = FrameMap::O7_opr; 1144 jobject2reg(c->as_jobject(), O7); 1145 } 1146 // handle either reg+reg or reg+disp address 1147 if (addr->index()->is_valid()) { 1148 assert(addr->disp() == 0, "must be zero"); 1149 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1150 } else { 1151 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1152 offset = store(tmp, base, addr->disp(), type, wide, false); 1153 } 1154 1155 break; 1156 } 1157 default: 1158 Unimplemented(); 1159 } 1160 if (info != NULL) { 1161 assert(offset != -1, "offset should've been set"); 1162 add_debug_info_for_null_check(offset, info); 1163 } 1164 } 1165 1166 1167 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1168 LIR_Const* c = src->as_constant_ptr(); 1169 LIR_Opr to_reg = dest; 1170 1171 switch (c->type()) { 1172 case T_INT: 1173 case T_ADDRESS: 1174 { 1175 jint con = c->as_jint(); 1176 if (to_reg->is_single_cpu()) { 1177 assert(patch_code == lir_patch_none, "no patching handled here"); 1178 __ set(con, to_reg->as_register()); 1179 } else { 1180 ShouldNotReachHere(); 1181 assert(to_reg->is_single_fpu(), "wrong register kind"); 1182 1183 __ set(con, O7); 1184 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1185 __ st(O7, temp_slot); 1186 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1187 } 1188 } 1189 break; 1190 1191 case T_LONG: 1192 { 1193 jlong con = c->as_jlong(); 1194 1195 if (to_reg->is_double_cpu()) { 1196 #ifdef _LP64 1197 __ set(con, to_reg->as_register_lo()); 1198 #else 1199 __ set(low(con), to_reg->as_register_lo()); 1200 __ set(high(con), to_reg->as_register_hi()); 1201 #endif 1202 #ifdef _LP64 1203 } else if (to_reg->is_single_cpu()) { 1204 __ set(con, to_reg->as_register()); 1205 #endif 1206 } else { 1207 ShouldNotReachHere(); 1208 assert(to_reg->is_double_fpu(), "wrong register kind"); 1209 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1210 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1211 __ set(low(con), O7); 1212 __ st(O7, temp_slot_lo); 1213 __ set(high(con), O7); 1214 __ st(O7, temp_slot_hi); 1215 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1216 } 1217 } 1218 break; 1219 1220 case T_OBJECT: 1221 { 1222 if (patch_code == lir_patch_none) { 1223 jobject2reg(c->as_jobject(), to_reg->as_register()); 1224 } else { 1225 jobject2reg_with_patching(to_reg->as_register(), info); 1226 } 1227 } 1228 break; 1229 1230 case T_FLOAT: 1231 { 1232 address const_addr = __ float_constant(c->as_jfloat()); 1233 if (const_addr == NULL) { 1234 bailout("const section overflow"); 1235 break; 1236 } 1237 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1238 AddressLiteral const_addrlit(const_addr, rspec); 1239 if (to_reg->is_single_fpu()) { 1240 __ patchable_sethi(const_addrlit, O7); 1241 __ relocate(rspec); 1242 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1243 1244 } else { 1245 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1246 1247 __ set(const_addrlit, O7); 1248 __ ld(O7, 0, to_reg->as_register()); 1249 } 1250 } 1251 break; 1252 1253 case T_DOUBLE: 1254 { 1255 address const_addr = __ double_constant(c->as_jdouble()); 1256 if (const_addr == NULL) { 1257 bailout("const section overflow"); 1258 break; 1259 } 1260 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1261 1262 if (to_reg->is_double_fpu()) { 1263 AddressLiteral const_addrlit(const_addr, rspec); 1264 __ patchable_sethi(const_addrlit, O7); 1265 __ relocate(rspec); 1266 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1267 } else { 1268 assert(to_reg->is_double_cpu(), "Must be a long register."); 1269 #ifdef _LP64 1270 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1271 #else 1272 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1273 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1274 #endif 1275 } 1276 1277 } 1278 break; 1279 1280 default: 1281 ShouldNotReachHere(); 1282 } 1283 } 1284 1285 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1286 Register reg = addr->base()->as_register(); 1287 return Address(reg, addr->disp()); 1288 } 1289 1290 1291 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1292 switch (type) { 1293 case T_INT: 1294 case T_FLOAT: { 1295 Register tmp = O7; 1296 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1297 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1298 __ lduw(from.base(), from.disp(), tmp); 1299 __ stw(tmp, to.base(), to.disp()); 1300 break; 1301 } 1302 case T_OBJECT: { 1303 Register tmp = O7; 1304 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1305 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1306 __ ld_ptr(from.base(), from.disp(), tmp); 1307 __ st_ptr(tmp, to.base(), to.disp()); 1308 break; 1309 } 1310 case T_LONG: 1311 case T_DOUBLE: { 1312 Register tmp = O7; 1313 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1314 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1315 __ lduw(from.base(), from.disp(), tmp); 1316 __ stw(tmp, to.base(), to.disp()); 1317 __ lduw(from.base(), from.disp() + 4, tmp); 1318 __ stw(tmp, to.base(), to.disp() + 4); 1319 break; 1320 } 1321 1322 default: 1323 ShouldNotReachHere(); 1324 } 1325 } 1326 1327 1328 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1329 Address base = as_Address(addr); 1330 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1331 } 1332 1333 1334 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1335 Address base = as_Address(addr); 1336 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1337 } 1338 1339 1340 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1341 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1342 1343 LIR_Address* addr = src_opr->as_address_ptr(); 1344 LIR_Opr to_reg = dest; 1345 1346 Register src = addr->base()->as_pointer_register(); 1347 Register disp_reg = noreg; 1348 int disp_value = addr->disp(); 1349 bool needs_patching = (patch_code != lir_patch_none); 1350 1351 if (addr->base()->type() == T_OBJECT) { 1352 __ verify_oop(src); 1353 } 1354 1355 PatchingStub* patch = NULL; 1356 if (needs_patching) { 1357 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1358 assert(!to_reg->is_double_cpu() || 1359 patch_code == lir_patch_none || 1360 patch_code == lir_patch_normal, "patching doesn't match register"); 1361 } 1362 1363 if (addr->index()->is_illegal()) { 1364 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1365 if (needs_patching) { 1366 __ patchable_set(0, O7); 1367 } else { 1368 __ set(disp_value, O7); 1369 } 1370 disp_reg = O7; 1371 } 1372 } else if (unaligned || PatchALot) { 1373 __ add(src, addr->index()->as_register(), O7); 1374 src = O7; 1375 } else { 1376 disp_reg = addr->index()->as_pointer_register(); 1377 assert(disp_value == 0, "can't handle 3 operand addresses"); 1378 } 1379 1380 // remember the offset of the load. The patching_epilog must be done 1381 // before the call to add_debug_info, otherwise the PcDescs don't get 1382 // entered in increasing order. 1383 int offset = code_offset(); 1384 1385 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1386 if (disp_reg == noreg) { 1387 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1388 } else { 1389 assert(!unaligned, "can't handle this"); 1390 offset = load(src, disp_reg, to_reg, type, wide); 1391 } 1392 1393 if (patch != NULL) { 1394 patching_epilog(patch, patch_code, src, info); 1395 } 1396 if (info != NULL) add_debug_info_for_null_check(offset, info); 1397 } 1398 1399 1400 void LIR_Assembler::prefetchr(LIR_Opr src) { 1401 LIR_Address* addr = src->as_address_ptr(); 1402 Address from_addr = as_Address(addr); 1403 1404 if (VM_Version::has_v9()) { 1405 __ prefetch(from_addr, Assembler::severalReads); 1406 } 1407 } 1408 1409 1410 void LIR_Assembler::prefetchw(LIR_Opr src) { 1411 LIR_Address* addr = src->as_address_ptr(); 1412 Address from_addr = as_Address(addr); 1413 1414 if (VM_Version::has_v9()) { 1415 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); 1416 } 1417 } 1418 1419 1420 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1421 Address addr; 1422 if (src->is_single_word()) { 1423 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1424 } else if (src->is_double_word()) { 1425 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1426 } 1427 1428 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1429 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1430 } 1431 1432 1433 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1434 Address addr; 1435 if (dest->is_single_word()) { 1436 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1437 } else if (dest->is_double_word()) { 1438 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1439 } 1440 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1441 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1442 } 1443 1444 1445 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1446 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1447 if (from_reg->is_double_fpu()) { 1448 // double to double moves 1449 assert(to_reg->is_double_fpu(), "should match"); 1450 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1451 } else { 1452 // float to float moves 1453 assert(to_reg->is_single_fpu(), "should match"); 1454 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1455 } 1456 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1457 if (from_reg->is_double_cpu()) { 1458 #ifdef _LP64 1459 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1460 #else 1461 assert(to_reg->is_double_cpu() && 1462 from_reg->as_register_hi() != to_reg->as_register_lo() && 1463 from_reg->as_register_lo() != to_reg->as_register_hi(), 1464 "should both be long and not overlap"); 1465 // long to long moves 1466 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1467 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1468 #endif 1469 #ifdef _LP64 1470 } else if (to_reg->is_double_cpu()) { 1471 // int to int moves 1472 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1473 #endif 1474 } else { 1475 // int to int moves 1476 __ mov(from_reg->as_register(), to_reg->as_register()); 1477 } 1478 } else { 1479 ShouldNotReachHere(); 1480 } 1481 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1482 __ verify_oop(to_reg->as_register()); 1483 } 1484 } 1485 1486 1487 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1488 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1489 bool wide, bool unaligned) { 1490 LIR_Address* addr = dest->as_address_ptr(); 1491 1492 Register src = addr->base()->as_pointer_register(); 1493 Register disp_reg = noreg; 1494 int disp_value = addr->disp(); 1495 bool needs_patching = (patch_code != lir_patch_none); 1496 1497 if (addr->base()->is_oop_register()) { 1498 __ verify_oop(src); 1499 } 1500 1501 PatchingStub* patch = NULL; 1502 if (needs_patching) { 1503 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1504 assert(!from_reg->is_double_cpu() || 1505 patch_code == lir_patch_none || 1506 patch_code == lir_patch_normal, "patching doesn't match register"); 1507 } 1508 1509 if (addr->index()->is_illegal()) { 1510 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1511 if (needs_patching) { 1512 __ patchable_set(0, O7); 1513 } else { 1514 __ set(disp_value, O7); 1515 } 1516 disp_reg = O7; 1517 } 1518 } else if (unaligned || PatchALot) { 1519 __ add(src, addr->index()->as_register(), O7); 1520 src = O7; 1521 } else { 1522 disp_reg = addr->index()->as_pointer_register(); 1523 assert(disp_value == 0, "can't handle 3 operand addresses"); 1524 } 1525 1526 // remember the offset of the store. The patching_epilog must be done 1527 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1528 // entered in increasing order. 1529 int offset; 1530 1531 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1532 if (disp_reg == noreg) { 1533 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1534 } else { 1535 assert(!unaligned, "can't handle this"); 1536 offset = store(from_reg, src, disp_reg, type, wide); 1537 } 1538 1539 if (patch != NULL) { 1540 patching_epilog(patch, patch_code, src, info); 1541 } 1542 1543 if (info != NULL) add_debug_info_for_null_check(offset, info); 1544 } 1545 1546 1547 void LIR_Assembler::return_op(LIR_Opr result) { 1548 // the poll may need a register so just pick one that isn't the return register 1549 #if defined(TIERED) && !defined(_LP64) 1550 if (result->type_field() == LIR_OprDesc::long_type) { 1551 // Must move the result to G1 1552 // Must leave proper result in O0,O1 and G1 (TIERED only) 1553 __ sllx(I0, 32, G1); // Shift bits into high G1 1554 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1555 __ or3 (I1, G1, G1); // OR 64 bits into G1 1556 #ifdef ASSERT 1557 // mangle it so any problems will show up 1558 __ set(0xdeadbeef, I0); 1559 __ set(0xdeadbeef, I1); 1560 #endif 1561 } 1562 #endif // TIERED 1563 __ set((intptr_t)os::get_polling_page(), L0); 1564 __ relocate(relocInfo::poll_return_type); 1565 __ ld_ptr(L0, 0, G0); 1566 __ ret(); 1567 __ delayed()->restore(); 1568 } 1569 1570 1571 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1572 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1573 if (info != NULL) { 1574 add_debug_info_for_branch(info); 1575 } else { 1576 __ relocate(relocInfo::poll_type); 1577 } 1578 1579 int offset = __ offset(); 1580 __ ld_ptr(tmp->as_register(), 0, G0); 1581 1582 return offset; 1583 } 1584 1585 1586 void LIR_Assembler::emit_static_call_stub() { 1587 address call_pc = __ pc(); 1588 address stub = __ start_a_stub(call_stub_size); 1589 if (stub == NULL) { 1590 bailout("static call stub overflow"); 1591 return; 1592 } 1593 1594 int start = __ offset(); 1595 __ relocate(static_stub_Relocation::spec(call_pc)); 1596 1597 __ set_oop(NULL, G5); 1598 // must be set to -1 at code generation time 1599 AddressLiteral addrlit(-1); 1600 __ jump_to(addrlit, G3); 1601 __ delayed()->nop(); 1602 1603 assert(__ offset() - start <= call_stub_size, "stub too big"); 1604 __ end_a_stub(); 1605 } 1606 1607 1608 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1609 if (opr1->is_single_fpu()) { 1610 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1611 } else if (opr1->is_double_fpu()) { 1612 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1613 } else if (opr1->is_single_cpu()) { 1614 if (opr2->is_constant()) { 1615 switch (opr2->as_constant_ptr()->type()) { 1616 case T_INT: 1617 { jint con = opr2->as_constant_ptr()->as_jint(); 1618 if (Assembler::is_simm13(con)) { 1619 __ cmp(opr1->as_register(), con); 1620 } else { 1621 __ set(con, O7); 1622 __ cmp(opr1->as_register(), O7); 1623 } 1624 } 1625 break; 1626 1627 case T_OBJECT: 1628 // there are only equal/notequal comparisions on objects 1629 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1630 if (con == NULL) { 1631 __ cmp(opr1->as_register(), 0); 1632 } else { 1633 jobject2reg(con, O7); 1634 __ cmp(opr1->as_register(), O7); 1635 } 1636 } 1637 break; 1638 1639 default: 1640 ShouldNotReachHere(); 1641 break; 1642 } 1643 } else { 1644 if (opr2->is_address()) { 1645 LIR_Address * addr = opr2->as_address_ptr(); 1646 BasicType type = addr->type(); 1647 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1648 else __ ld(as_Address(addr), O7); 1649 __ cmp(opr1->as_register(), O7); 1650 } else { 1651 __ cmp(opr1->as_register(), opr2->as_register()); 1652 } 1653 } 1654 } else if (opr1->is_double_cpu()) { 1655 Register xlo = opr1->as_register_lo(); 1656 Register xhi = opr1->as_register_hi(); 1657 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1658 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1659 #ifdef _LP64 1660 __ orcc(xhi, G0, G0); 1661 #else 1662 __ orcc(xhi, xlo, G0); 1663 #endif 1664 } else if (opr2->is_register()) { 1665 Register ylo = opr2->as_register_lo(); 1666 Register yhi = opr2->as_register_hi(); 1667 #ifdef _LP64 1668 __ cmp(xlo, ylo); 1669 #else 1670 __ subcc(xlo, ylo, xlo); 1671 __ subccc(xhi, yhi, xhi); 1672 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1673 __ orcc(xhi, xlo, G0); 1674 } 1675 #endif 1676 } else { 1677 ShouldNotReachHere(); 1678 } 1679 } else if (opr1->is_address()) { 1680 LIR_Address * addr = opr1->as_address_ptr(); 1681 BasicType type = addr->type(); 1682 assert (opr2->is_constant(), "Checking"); 1683 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1684 else __ ld(as_Address(addr), O7); 1685 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1686 } else { 1687 ShouldNotReachHere(); 1688 } 1689 } 1690 1691 1692 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1693 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1694 bool is_unordered_less = (code == lir_ucmp_fd2i); 1695 if (left->is_single_fpu()) { 1696 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1697 } else if (left->is_double_fpu()) { 1698 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1699 } else { 1700 ShouldNotReachHere(); 1701 } 1702 } else if (code == lir_cmp_l2i) { 1703 #ifdef _LP64 1704 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1705 #else 1706 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1707 right->as_register_hi(), right->as_register_lo(), 1708 dst->as_register()); 1709 #endif 1710 } else { 1711 ShouldNotReachHere(); 1712 } 1713 } 1714 1715 1716 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1717 Assembler::Condition acond; 1718 switch (condition) { 1719 case lir_cond_equal: acond = Assembler::equal; break; 1720 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1721 case lir_cond_less: acond = Assembler::less; break; 1722 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1723 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1724 case lir_cond_greater: acond = Assembler::greater; break; 1725 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1726 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1727 default: ShouldNotReachHere(); 1728 }; 1729 1730 if (opr1->is_constant() && opr1->type() == T_INT) { 1731 Register dest = result->as_register(); 1732 // load up first part of constant before branch 1733 // and do the rest in the delay slot. 1734 if (!Assembler::is_simm13(opr1->as_jint())) { 1735 __ sethi(opr1->as_jint(), dest); 1736 } 1737 } else if (opr1->is_constant()) { 1738 const2reg(opr1, result, lir_patch_none, NULL); 1739 } else if (opr1->is_register()) { 1740 reg2reg(opr1, result); 1741 } else if (opr1->is_stack()) { 1742 stack2reg(opr1, result, result->type()); 1743 } else { 1744 ShouldNotReachHere(); 1745 } 1746 Label skip; 1747 #ifdef _LP64 1748 if (type == T_INT) { 1749 __ br(acond, false, Assembler::pt, skip); 1750 } else 1751 #endif 1752 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1753 if (opr1->is_constant() && opr1->type() == T_INT) { 1754 Register dest = result->as_register(); 1755 if (Assembler::is_simm13(opr1->as_jint())) { 1756 __ delayed()->or3(G0, opr1->as_jint(), dest); 1757 } else { 1758 // the sethi has been done above, so just put in the low 10 bits 1759 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1760 } 1761 } else { 1762 // can't do anything useful in the delay slot 1763 __ delayed()->nop(); 1764 } 1765 if (opr2->is_constant()) { 1766 const2reg(opr2, result, lir_patch_none, NULL); 1767 } else if (opr2->is_register()) { 1768 reg2reg(opr2, result); 1769 } else if (opr2->is_stack()) { 1770 stack2reg(opr2, result, result->type()); 1771 } else { 1772 ShouldNotReachHere(); 1773 } 1774 __ bind(skip); 1775 } 1776 1777 1778 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1779 assert(info == NULL, "unused on this code path"); 1780 assert(left->is_register(), "wrong items state"); 1781 assert(dest->is_register(), "wrong items state"); 1782 1783 if (right->is_register()) { 1784 if (dest->is_float_kind()) { 1785 1786 FloatRegister lreg, rreg, res; 1787 FloatRegisterImpl::Width w; 1788 if (right->is_single_fpu()) { 1789 w = FloatRegisterImpl::S; 1790 lreg = left->as_float_reg(); 1791 rreg = right->as_float_reg(); 1792 res = dest->as_float_reg(); 1793 } else { 1794 w = FloatRegisterImpl::D; 1795 lreg = left->as_double_reg(); 1796 rreg = right->as_double_reg(); 1797 res = dest->as_double_reg(); 1798 } 1799 1800 switch (code) { 1801 case lir_add: __ fadd(w, lreg, rreg, res); break; 1802 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1803 case lir_mul: // fall through 1804 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1805 case lir_div: // fall through 1806 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1807 default: ShouldNotReachHere(); 1808 } 1809 1810 } else if (dest->is_double_cpu()) { 1811 #ifdef _LP64 1812 Register dst_lo = dest->as_register_lo(); 1813 Register op1_lo = left->as_pointer_register(); 1814 Register op2_lo = right->as_pointer_register(); 1815 1816 switch (code) { 1817 case lir_add: 1818 __ add(op1_lo, op2_lo, dst_lo); 1819 break; 1820 1821 case lir_sub: 1822 __ sub(op1_lo, op2_lo, dst_lo); 1823 break; 1824 1825 default: ShouldNotReachHere(); 1826 } 1827 #else 1828 Register op1_lo = left->as_register_lo(); 1829 Register op1_hi = left->as_register_hi(); 1830 Register op2_lo = right->as_register_lo(); 1831 Register op2_hi = right->as_register_hi(); 1832 Register dst_lo = dest->as_register_lo(); 1833 Register dst_hi = dest->as_register_hi(); 1834 1835 switch (code) { 1836 case lir_add: 1837 __ addcc(op1_lo, op2_lo, dst_lo); 1838 __ addc (op1_hi, op2_hi, dst_hi); 1839 break; 1840 1841 case lir_sub: 1842 __ subcc(op1_lo, op2_lo, dst_lo); 1843 __ subc (op1_hi, op2_hi, dst_hi); 1844 break; 1845 1846 default: ShouldNotReachHere(); 1847 } 1848 #endif 1849 } else { 1850 assert (right->is_single_cpu(), "Just Checking"); 1851 1852 Register lreg = left->as_register(); 1853 Register res = dest->as_register(); 1854 Register rreg = right->as_register(); 1855 switch (code) { 1856 case lir_add: __ add (lreg, rreg, res); break; 1857 case lir_sub: __ sub (lreg, rreg, res); break; 1858 case lir_mul: __ mult (lreg, rreg, res); break; 1859 default: ShouldNotReachHere(); 1860 } 1861 } 1862 } else { 1863 assert (right->is_constant(), "must be constant"); 1864 1865 if (dest->is_single_cpu()) { 1866 Register lreg = left->as_register(); 1867 Register res = dest->as_register(); 1868 int simm13 = right->as_constant_ptr()->as_jint(); 1869 1870 switch (code) { 1871 case lir_add: __ add (lreg, simm13, res); break; 1872 case lir_sub: __ sub (lreg, simm13, res); break; 1873 case lir_mul: __ mult (lreg, simm13, res); break; 1874 default: ShouldNotReachHere(); 1875 } 1876 } else { 1877 Register lreg = left->as_pointer_register(); 1878 Register res = dest->as_register_lo(); 1879 long con = right->as_constant_ptr()->as_jlong(); 1880 assert(Assembler::is_simm13(con), "must be simm13"); 1881 1882 switch (code) { 1883 case lir_add: __ add (lreg, (int)con, res); break; 1884 case lir_sub: __ sub (lreg, (int)con, res); break; 1885 case lir_mul: __ mult (lreg, (int)con, res); break; 1886 default: ShouldNotReachHere(); 1887 } 1888 } 1889 } 1890 } 1891 1892 1893 void LIR_Assembler::fpop() { 1894 // do nothing 1895 } 1896 1897 1898 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1899 switch (code) { 1900 case lir_sin: 1901 case lir_tan: 1902 case lir_cos: { 1903 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1904 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1905 break; 1906 } 1907 case lir_sqrt: { 1908 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1909 FloatRegister src_reg = value->as_double_reg(); 1910 FloatRegister dst_reg = dest->as_double_reg(); 1911 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1912 break; 1913 } 1914 case lir_abs: { 1915 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1916 FloatRegister src_reg = value->as_double_reg(); 1917 FloatRegister dst_reg = dest->as_double_reg(); 1918 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1919 break; 1920 } 1921 default: { 1922 ShouldNotReachHere(); 1923 break; 1924 } 1925 } 1926 } 1927 1928 1929 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1930 if (right->is_constant()) { 1931 if (dest->is_single_cpu()) { 1932 int simm13 = right->as_constant_ptr()->as_jint(); 1933 switch (code) { 1934 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1935 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1936 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1937 default: ShouldNotReachHere(); 1938 } 1939 } else { 1940 long c = right->as_constant_ptr()->as_jlong(); 1941 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1942 int simm13 = (int)c; 1943 switch (code) { 1944 case lir_logic_and: 1945 #ifndef _LP64 1946 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1947 #endif 1948 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1949 break; 1950 1951 case lir_logic_or: 1952 #ifndef _LP64 1953 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 1954 #endif 1955 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1956 break; 1957 1958 case lir_logic_xor: 1959 #ifndef _LP64 1960 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 1961 #endif 1962 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1963 break; 1964 1965 default: ShouldNotReachHere(); 1966 } 1967 } 1968 } else { 1969 assert(right->is_register(), "right should be in register"); 1970 1971 if (dest->is_single_cpu()) { 1972 switch (code) { 1973 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1974 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1975 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1976 default: ShouldNotReachHere(); 1977 } 1978 } else { 1979 #ifdef _LP64 1980 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1981 left->as_register_lo(); 1982 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1983 right->as_register_lo(); 1984 1985 switch (code) { 1986 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1987 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1988 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1989 default: ShouldNotReachHere(); 1990 } 1991 #else 1992 switch (code) { 1993 case lir_logic_and: 1994 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1995 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1996 break; 1997 1998 case lir_logic_or: 1999 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2000 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2001 break; 2002 2003 case lir_logic_xor: 2004 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2005 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2006 break; 2007 2008 default: ShouldNotReachHere(); 2009 } 2010 #endif 2011 } 2012 } 2013 } 2014 2015 2016 int LIR_Assembler::shift_amount(BasicType t) { 2017 int elem_size = type2aelembytes(t); 2018 switch (elem_size) { 2019 case 1 : return 0; 2020 case 2 : return 1; 2021 case 4 : return 2; 2022 case 8 : return 3; 2023 } 2024 ShouldNotReachHere(); 2025 return -1; 2026 } 2027 2028 2029 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2030 assert(exceptionOop->as_register() == Oexception, "should match"); 2031 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 2032 2033 info->add_register_oop(exceptionOop); 2034 2035 // reuse the debug info from the safepoint poll for the throw op itself 2036 address pc_for_athrow = __ pc(); 2037 int pc_for_athrow_offset = __ offset(); 2038 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 2039 __ set(pc_for_athrow, Oissuing_pc, rspec); 2040 add_call_info(pc_for_athrow_offset, info); // for exception handler 2041 2042 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 2043 __ delayed()->nop(); 2044 } 2045 2046 2047 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2048 assert(exceptionOop->as_register() == Oexception, "should match"); 2049 2050 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 2051 __ delayed()->nop(); 2052 } 2053 2054 2055 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2056 Register src = op->src()->as_register(); 2057 Register dst = op->dst()->as_register(); 2058 Register src_pos = op->src_pos()->as_register(); 2059 Register dst_pos = op->dst_pos()->as_register(); 2060 Register length = op->length()->as_register(); 2061 Register tmp = op->tmp()->as_register(); 2062 Register tmp2 = O7; 2063 2064 int flags = op->flags(); 2065 ciArrayKlass* default_type = op->expected_type(); 2066 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2067 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2068 2069 #ifdef _LP64 2070 // higher 32bits must be null 2071 __ sra(dst_pos, 0, dst_pos); 2072 __ sra(src_pos, 0, src_pos); 2073 __ sra(length, 0, length); 2074 #endif 2075 2076 // set up the arraycopy stub information 2077 ArrayCopyStub* stub = op->stub(); 2078 2079 // always do stub if no type information is available. it's ok if 2080 // the known type isn't loaded since the code sanity checks 2081 // in debug mode and the type isn't required when we know the exact type 2082 // also check that the type is an array type. 2083 if (op->expected_type() == NULL) { 2084 __ mov(src, O0); 2085 __ mov(src_pos, O1); 2086 __ mov(dst, O2); 2087 __ mov(dst_pos, O3); 2088 __ mov(length, O4); 2089 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2090 2091 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2092 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2093 } else { 2094 #ifndef PRODUCT 2095 if (PrintC1Statistics) { 2096 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 2097 __ inc_counter(counter, G1, G3); 2098 } 2099 #endif 2100 __ call_VM_leaf(tmp, copyfunc_addr); 2101 } 2102 2103 if (copyfunc_addr != NULL) { 2104 __ xor3(O0, -1, tmp); 2105 __ sub(length, tmp, length); 2106 __ add(src_pos, tmp, src_pos); 2107 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2108 __ delayed()->add(dst_pos, tmp, dst_pos); 2109 } else { 2110 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2111 __ delayed()->nop(); 2112 } 2113 __ bind(*stub->continuation()); 2114 return; 2115 } 2116 2117 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2118 2119 // make sure src and dst are non-null and load array length 2120 if (flags & LIR_OpArrayCopy::src_null_check) { 2121 __ tst(src); 2122 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2123 __ delayed()->nop(); 2124 } 2125 2126 if (flags & LIR_OpArrayCopy::dst_null_check) { 2127 __ tst(dst); 2128 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2129 __ delayed()->nop(); 2130 } 2131 2132 // If the compiler was not able to prove that exact type of the source or the destination 2133 // of the arraycopy is an array type, check at runtime if the source or the destination is 2134 // an instance type. 2135 if (flags & LIR_OpArrayCopy::type_check) { 2136 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2137 __ load_klass(dst, tmp); 2138 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2139 __ cmp(tmp2, Klass::_lh_neutral_value); 2140 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2141 __ delayed()->nop(); 2142 } 2143 2144 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2145 __ load_klass(src, tmp); 2146 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2147 __ cmp(tmp2, Klass::_lh_neutral_value); 2148 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2149 __ delayed()->nop(); 2150 } 2151 } 2152 2153 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2154 // test src_pos register 2155 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 2156 __ delayed()->nop(); 2157 } 2158 2159 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2160 // test dst_pos register 2161 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 2162 __ delayed()->nop(); 2163 } 2164 2165 if (flags & LIR_OpArrayCopy::length_positive_check) { 2166 // make sure length isn't negative 2167 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 2168 __ delayed()->nop(); 2169 } 2170 2171 if (flags & LIR_OpArrayCopy::src_range_check) { 2172 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2173 __ add(length, src_pos, tmp); 2174 __ cmp(tmp2, tmp); 2175 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2176 __ delayed()->nop(); 2177 } 2178 2179 if (flags & LIR_OpArrayCopy::dst_range_check) { 2180 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2181 __ add(length, dst_pos, tmp); 2182 __ cmp(tmp2, tmp); 2183 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2184 __ delayed()->nop(); 2185 } 2186 2187 int shift = shift_amount(basic_type); 2188 2189 if (flags & LIR_OpArrayCopy::type_check) { 2190 // We don't know the array types are compatible 2191 if (basic_type != T_OBJECT) { 2192 // Simple test for basic type arrays 2193 if (UseCompressedOops) { 2194 // We don't need decode because we just need to compare 2195 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2196 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2197 __ cmp(tmp, tmp2); 2198 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2199 } else { 2200 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2201 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2202 __ cmp(tmp, tmp2); 2203 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2204 } 2205 __ delayed()->nop(); 2206 } else { 2207 // For object arrays, if src is a sub class of dst then we can 2208 // safely do the copy. 2209 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2210 2211 Label cont, slow; 2212 assert_different_registers(tmp, tmp2, G3, G1); 2213 2214 __ load_klass(src, G3); 2215 __ load_klass(dst, G1); 2216 2217 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2218 2219 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2220 __ delayed()->nop(); 2221 2222 __ cmp(G3, 0); 2223 if (copyfunc_addr != NULL) { // use stub if available 2224 // src is not a sub class of dst so we have to do a 2225 // per-element check. 2226 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2227 __ delayed()->nop(); 2228 2229 __ bind(slow); 2230 2231 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2232 if ((flags & mask) != mask) { 2233 // Check that at least both of them object arrays. 2234 assert(flags & mask, "one of the two should be known to be an object array"); 2235 2236 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2237 __ load_klass(src, tmp); 2238 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2239 __ load_klass(dst, tmp); 2240 } 2241 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2242 2243 __ lduw(tmp, lh_offset, tmp2); 2244 2245 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2246 __ set(objArray_lh, tmp); 2247 __ cmp(tmp, tmp2); 2248 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2249 __ delayed()->nop(); 2250 } 2251 2252 Register src_ptr = O0; 2253 Register dst_ptr = O1; 2254 Register len = O2; 2255 Register chk_off = O3; 2256 Register super_k = O4; 2257 2258 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2259 if (shift == 0) { 2260 __ add(src_ptr, src_pos, src_ptr); 2261 } else { 2262 __ sll(src_pos, shift, tmp); 2263 __ add(src_ptr, tmp, src_ptr); 2264 } 2265 2266 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2267 if (shift == 0) { 2268 __ add(dst_ptr, dst_pos, dst_ptr); 2269 } else { 2270 __ sll(dst_pos, shift, tmp); 2271 __ add(dst_ptr, tmp, dst_ptr); 2272 } 2273 __ mov(length, len); 2274 __ load_klass(dst, tmp); 2275 2276 int ek_offset = in_bytes(objArrayKlass::element_klass_offset()); 2277 __ ld_ptr(tmp, ek_offset, super_k); 2278 2279 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2280 __ lduw(super_k, sco_offset, chk_off); 2281 2282 __ call_VM_leaf(tmp, copyfunc_addr); 2283 2284 #ifndef PRODUCT 2285 if (PrintC1Statistics) { 2286 Label failed; 2287 __ br_notnull_short(O0, Assembler::pn, failed); 2288 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2289 __ bind(failed); 2290 } 2291 #endif 2292 2293 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2294 __ delayed()->xor3(O0, -1, tmp); 2295 2296 #ifndef PRODUCT 2297 if (PrintC1Statistics) { 2298 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2299 } 2300 #endif 2301 2302 __ sub(length, tmp, length); 2303 __ add(src_pos, tmp, src_pos); 2304 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2305 __ delayed()->add(dst_pos, tmp, dst_pos); 2306 2307 __ bind(cont); 2308 } else { 2309 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2310 __ delayed()->nop(); 2311 __ bind(cont); 2312 } 2313 } 2314 } 2315 2316 #ifdef ASSERT 2317 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2318 // Sanity check the known type with the incoming class. For the 2319 // primitive case the types must match exactly with src.klass and 2320 // dst.klass each exactly matching the default type. For the 2321 // object array case, if no type check is needed then either the 2322 // dst type is exactly the expected type and the src type is a 2323 // subtype which we can't check or src is the same array as dst 2324 // but not necessarily exactly of type default_type. 2325 Label known_ok, halt; 2326 jobject2reg(op->expected_type()->constant_encoding(), tmp); 2327 if (UseCompressedOops) { 2328 // tmp holds the default type. It currently comes uncompressed after the 2329 // load of a constant, so encode it. 2330 __ encode_heap_oop(tmp); 2331 // load the raw value of the dst klass, since we will be comparing 2332 // uncompressed values directly. 2333 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2334 if (basic_type != T_OBJECT) { 2335 __ cmp(tmp, tmp2); 2336 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2337 // load the raw value of the src klass. 2338 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2339 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2340 } else { 2341 __ cmp(tmp, tmp2); 2342 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2343 __ delayed()->cmp(src, dst); 2344 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2345 __ delayed()->nop(); 2346 } 2347 } else { 2348 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2349 if (basic_type != T_OBJECT) { 2350 __ cmp(tmp, tmp2); 2351 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2352 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2353 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2354 } else { 2355 __ cmp(tmp, tmp2); 2356 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2357 __ delayed()->cmp(src, dst); 2358 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2359 __ delayed()->nop(); 2360 } 2361 } 2362 __ bind(halt); 2363 __ stop("incorrect type information in arraycopy"); 2364 __ bind(known_ok); 2365 } 2366 #endif 2367 2368 #ifndef PRODUCT 2369 if (PrintC1Statistics) { 2370 address counter = Runtime1::arraycopy_count_address(basic_type); 2371 __ inc_counter(counter, G1, G3); 2372 } 2373 #endif 2374 2375 Register src_ptr = O0; 2376 Register dst_ptr = O1; 2377 Register len = O2; 2378 2379 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2380 if (shift == 0) { 2381 __ add(src_ptr, src_pos, src_ptr); 2382 } else { 2383 __ sll(src_pos, shift, tmp); 2384 __ add(src_ptr, tmp, src_ptr); 2385 } 2386 2387 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2388 if (shift == 0) { 2389 __ add(dst_ptr, dst_pos, dst_ptr); 2390 } else { 2391 __ sll(dst_pos, shift, tmp); 2392 __ add(dst_ptr, tmp, dst_ptr); 2393 } 2394 2395 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2396 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2397 const char *name; 2398 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2399 2400 // arraycopy stubs takes a length in number of elements, so don't scale it. 2401 __ mov(length, len); 2402 __ call_VM_leaf(tmp, entry); 2403 2404 __ bind(*stub->continuation()); 2405 } 2406 2407 2408 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2409 if (dest->is_single_cpu()) { 2410 #ifdef _LP64 2411 if (left->type() == T_OBJECT) { 2412 switch (code) { 2413 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2414 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2415 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2416 default: ShouldNotReachHere(); 2417 } 2418 } else 2419 #endif 2420 switch (code) { 2421 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2422 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2423 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2424 default: ShouldNotReachHere(); 2425 } 2426 } else { 2427 #ifdef _LP64 2428 switch (code) { 2429 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2430 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2431 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2432 default: ShouldNotReachHere(); 2433 } 2434 #else 2435 switch (code) { 2436 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2437 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2438 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2439 default: ShouldNotReachHere(); 2440 } 2441 #endif 2442 } 2443 } 2444 2445 2446 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2447 #ifdef _LP64 2448 if (left->type() == T_OBJECT) { 2449 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2450 Register l = left->as_register(); 2451 Register d = dest->as_register_lo(); 2452 switch (code) { 2453 case lir_shl: __ sllx (l, count, d); break; 2454 case lir_shr: __ srax (l, count, d); break; 2455 case lir_ushr: __ srlx (l, count, d); break; 2456 default: ShouldNotReachHere(); 2457 } 2458 return; 2459 } 2460 #endif 2461 2462 if (dest->is_single_cpu()) { 2463 count = count & 0x1F; // Java spec 2464 switch (code) { 2465 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2466 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2467 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2468 default: ShouldNotReachHere(); 2469 } 2470 } else if (dest->is_double_cpu()) { 2471 count = count & 63; // Java spec 2472 switch (code) { 2473 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2474 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2475 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2476 default: ShouldNotReachHere(); 2477 } 2478 } else { 2479 ShouldNotReachHere(); 2480 } 2481 } 2482 2483 2484 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2485 assert(op->tmp1()->as_register() == G1 && 2486 op->tmp2()->as_register() == G3 && 2487 op->tmp3()->as_register() == G4 && 2488 op->obj()->as_register() == O0 && 2489 op->klass()->as_register() == G5, "must be"); 2490 if (op->init_check()) { 2491 __ ldub(op->klass()->as_register(), 2492 in_bytes(instanceKlass::init_state_offset()), 2493 op->tmp1()->as_register()); 2494 add_debug_info_for_null_check_here(op->stub()->info()); 2495 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized); 2496 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2497 __ delayed()->nop(); 2498 } 2499 __ allocate_object(op->obj()->as_register(), 2500 op->tmp1()->as_register(), 2501 op->tmp2()->as_register(), 2502 op->tmp3()->as_register(), 2503 op->header_size(), 2504 op->object_size(), 2505 op->klass()->as_register(), 2506 *op->stub()->entry()); 2507 __ bind(*op->stub()->continuation()); 2508 __ verify_oop(op->obj()->as_register()); 2509 } 2510 2511 2512 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2513 assert(op->tmp1()->as_register() == G1 && 2514 op->tmp2()->as_register() == G3 && 2515 op->tmp3()->as_register() == G4 && 2516 op->tmp4()->as_register() == O1 && 2517 op->klass()->as_register() == G5, "must be"); 2518 2519 LP64_ONLY( __ signx(op->len()->as_register()); ) 2520 if (UseSlowPath || 2521 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2522 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2523 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2524 __ delayed()->nop(); 2525 } else { 2526 __ allocate_array(op->obj()->as_register(), 2527 op->len()->as_register(), 2528 op->tmp1()->as_register(), 2529 op->tmp2()->as_register(), 2530 op->tmp3()->as_register(), 2531 arrayOopDesc::header_size(op->type()), 2532 type2aelembytes(op->type()), 2533 op->klass()->as_register(), 2534 *op->stub()->entry()); 2535 } 2536 __ bind(*op->stub()->continuation()); 2537 } 2538 2539 2540 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2541 ciMethodData *md, ciProfileData *data, 2542 Register recv, Register tmp1, Label* update_done) { 2543 uint i; 2544 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2545 Label next_test; 2546 // See if the receiver is receiver[n]. 2547 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2548 mdo_offset_bias); 2549 __ ld_ptr(receiver_addr, tmp1); 2550 __ verify_oop(tmp1); 2551 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2552 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2553 mdo_offset_bias); 2554 __ ld_ptr(data_addr, tmp1); 2555 __ add(tmp1, DataLayout::counter_increment, tmp1); 2556 __ st_ptr(tmp1, data_addr); 2557 __ ba(*update_done); 2558 __ delayed()->nop(); 2559 __ bind(next_test); 2560 } 2561 2562 // Didn't find receiver; find next empty slot and fill it in 2563 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2564 Label next_test; 2565 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2566 mdo_offset_bias); 2567 __ ld_ptr(recv_addr, tmp1); 2568 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2569 __ st_ptr(recv, recv_addr); 2570 __ set(DataLayout::counter_increment, tmp1); 2571 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2572 mdo_offset_bias); 2573 __ ba(*update_done); 2574 __ delayed()->nop(); 2575 __ bind(next_test); 2576 } 2577 } 2578 2579 2580 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2581 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2582 md = method->method_data_or_null(); 2583 assert(md != NULL, "Sanity"); 2584 data = md->bci_to_data(bci); 2585 assert(data != NULL, "need data for checkcast"); 2586 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2587 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2588 // The offset is large so bias the mdo by the base of the slot so 2589 // that the ld can use simm13s to reference the slots of the data 2590 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2591 } 2592 } 2593 2594 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2595 // we always need a stub for the failure case. 2596 CodeStub* stub = op->stub(); 2597 Register obj = op->object()->as_register(); 2598 Register k_RInfo = op->tmp1()->as_register(); 2599 Register klass_RInfo = op->tmp2()->as_register(); 2600 Register dst = op->result_opr()->as_register(); 2601 Register Rtmp1 = op->tmp3()->as_register(); 2602 ciKlass* k = op->klass(); 2603 2604 2605 if (obj == k_RInfo) { 2606 k_RInfo = klass_RInfo; 2607 klass_RInfo = obj; 2608 } 2609 2610 ciMethodData* md; 2611 ciProfileData* data; 2612 int mdo_offset_bias = 0; 2613 if (op->should_profile()) { 2614 ciMethod* method = op->profiled_method(); 2615 assert(method != NULL, "Should have method"); 2616 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2617 2618 Label not_null; 2619 __ br_notnull_short(obj, Assembler::pn, not_null); 2620 Register mdo = k_RInfo; 2621 Register data_val = Rtmp1; 2622 jobject2reg(md->constant_encoding(), mdo); 2623 if (mdo_offset_bias > 0) { 2624 __ set(mdo_offset_bias, data_val); 2625 __ add(mdo, data_val, mdo); 2626 } 2627 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2628 __ ldub(flags_addr, data_val); 2629 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2630 __ stb(data_val, flags_addr); 2631 __ ba(*obj_is_null); 2632 __ delayed()->nop(); 2633 __ bind(not_null); 2634 } else { 2635 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2636 __ delayed()->nop(); 2637 } 2638 2639 Label profile_cast_failure, profile_cast_success; 2640 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2641 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2642 2643 // patching may screw with our temporaries on sparc, 2644 // so let's do it before loading the class 2645 if (k->is_loaded()) { 2646 jobject2reg(k->constant_encoding(), k_RInfo); 2647 } else { 2648 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 2649 } 2650 assert(obj != k_RInfo, "must be different"); 2651 2652 // get object class 2653 // not a safepoint as obj null check happens earlier 2654 __ load_klass(obj, klass_RInfo); 2655 if (op->fast_check()) { 2656 assert_different_registers(klass_RInfo, k_RInfo); 2657 __ cmp(k_RInfo, klass_RInfo); 2658 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2659 __ delayed()->nop(); 2660 } else { 2661 bool need_slow_path = true; 2662 if (k->is_loaded()) { 2663 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2664 need_slow_path = false; 2665 // perform the fast part of the checking logic 2666 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2667 (need_slow_path ? success_target : NULL), 2668 failure_target, NULL, 2669 RegisterOrConstant(k->super_check_offset())); 2670 } else { 2671 // perform the fast part of the checking logic 2672 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2673 failure_target, NULL); 2674 } 2675 if (need_slow_path) { 2676 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2677 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2678 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2679 __ delayed()->nop(); 2680 __ cmp(G3, 0); 2681 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2682 __ delayed()->nop(); 2683 // Fall through to success case 2684 } 2685 } 2686 2687 if (op->should_profile()) { 2688 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2689 assert_different_registers(obj, mdo, recv, tmp1); 2690 __ bind(profile_cast_success); 2691 jobject2reg(md->constant_encoding(), mdo); 2692 if (mdo_offset_bias > 0) { 2693 __ set(mdo_offset_bias, tmp1); 2694 __ add(mdo, tmp1, mdo); 2695 } 2696 __ load_klass(obj, recv); 2697 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2698 // Jump over the failure case 2699 __ ba(*success); 2700 __ delayed()->nop(); 2701 // Cast failure case 2702 __ bind(profile_cast_failure); 2703 jobject2reg(md->constant_encoding(), mdo); 2704 if (mdo_offset_bias > 0) { 2705 __ set(mdo_offset_bias, tmp1); 2706 __ add(mdo, tmp1, mdo); 2707 } 2708 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2709 __ ld_ptr(data_addr, tmp1); 2710 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2711 __ st_ptr(tmp1, data_addr); 2712 __ ba(*failure); 2713 __ delayed()->nop(); 2714 } 2715 __ ba(*success); 2716 __ delayed()->nop(); 2717 } 2718 2719 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2720 LIR_Code code = op->code(); 2721 if (code == lir_store_check) { 2722 Register value = op->object()->as_register(); 2723 Register array = op->array()->as_register(); 2724 Register k_RInfo = op->tmp1()->as_register(); 2725 Register klass_RInfo = op->tmp2()->as_register(); 2726 Register Rtmp1 = op->tmp3()->as_register(); 2727 2728 __ verify_oop(value); 2729 CodeStub* stub = op->stub(); 2730 // check if it needs to be profiled 2731 ciMethodData* md; 2732 ciProfileData* data; 2733 int mdo_offset_bias = 0; 2734 if (op->should_profile()) { 2735 ciMethod* method = op->profiled_method(); 2736 assert(method != NULL, "Should have method"); 2737 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2738 } 2739 Label profile_cast_success, profile_cast_failure, done; 2740 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2741 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2742 2743 if (op->should_profile()) { 2744 Label not_null; 2745 __ br_notnull_short(value, Assembler::pn, not_null); 2746 Register mdo = k_RInfo; 2747 Register data_val = Rtmp1; 2748 jobject2reg(md->constant_encoding(), mdo); 2749 if (mdo_offset_bias > 0) { 2750 __ set(mdo_offset_bias, data_val); 2751 __ add(mdo, data_val, mdo); 2752 } 2753 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2754 __ ldub(flags_addr, data_val); 2755 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2756 __ stb(data_val, flags_addr); 2757 __ ba_short(done); 2758 __ bind(not_null); 2759 } else { 2760 __ br_null_short(value, Assembler::pn, done); 2761 } 2762 add_debug_info_for_null_check_here(op->info_for_exception()); 2763 __ load_klass(array, k_RInfo); 2764 __ load_klass(value, klass_RInfo); 2765 2766 // get instance klass 2767 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo); 2768 // perform the fast part of the checking logic 2769 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2770 2771 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2772 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2773 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2774 __ delayed()->nop(); 2775 __ cmp(G3, 0); 2776 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2777 __ delayed()->nop(); 2778 // fall through to the success case 2779 2780 if (op->should_profile()) { 2781 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2782 assert_different_registers(value, mdo, recv, tmp1); 2783 __ bind(profile_cast_success); 2784 jobject2reg(md->constant_encoding(), mdo); 2785 if (mdo_offset_bias > 0) { 2786 __ set(mdo_offset_bias, tmp1); 2787 __ add(mdo, tmp1, mdo); 2788 } 2789 __ load_klass(value, recv); 2790 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2791 __ ba_short(done); 2792 // Cast failure case 2793 __ bind(profile_cast_failure); 2794 jobject2reg(md->constant_encoding(), mdo); 2795 if (mdo_offset_bias > 0) { 2796 __ set(mdo_offset_bias, tmp1); 2797 __ add(mdo, tmp1, mdo); 2798 } 2799 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2800 __ ld_ptr(data_addr, tmp1); 2801 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2802 __ st_ptr(tmp1, data_addr); 2803 __ ba(*stub->entry()); 2804 __ delayed()->nop(); 2805 } 2806 __ bind(done); 2807 } else if (code == lir_checkcast) { 2808 Register obj = op->object()->as_register(); 2809 Register dst = op->result_opr()->as_register(); 2810 Label success; 2811 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2812 __ bind(success); 2813 __ mov(obj, dst); 2814 } else if (code == lir_instanceof) { 2815 Register obj = op->object()->as_register(); 2816 Register dst = op->result_opr()->as_register(); 2817 Label success, failure, done; 2818 emit_typecheck_helper(op, &success, &failure, &failure); 2819 __ bind(failure); 2820 __ set(0, dst); 2821 __ ba_short(done); 2822 __ bind(success); 2823 __ set(1, dst); 2824 __ bind(done); 2825 } else { 2826 ShouldNotReachHere(); 2827 } 2828 2829 } 2830 2831 2832 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2833 if (op->code() == lir_cas_long) { 2834 assert(VM_Version::supports_cx8(), "wrong machine"); 2835 Register addr = op->addr()->as_pointer_register(); 2836 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2837 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2838 Register new_value_lo = op->new_value()->as_register_lo(); 2839 Register new_value_hi = op->new_value()->as_register_hi(); 2840 Register t1 = op->tmp1()->as_register(); 2841 Register t2 = op->tmp2()->as_register(); 2842 #ifdef _LP64 2843 __ mov(cmp_value_lo, t1); 2844 __ mov(new_value_lo, t2); 2845 // perform the compare and swap operation 2846 __ casx(addr, t1, t2); 2847 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2848 // overwritten with the original value in "addr" and will be equal to t1. 2849 __ cmp(t1, t2); 2850 #else 2851 // move high and low halves of long values into single registers 2852 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2853 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2854 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2855 __ sllx(new_value_hi, 32, t2); 2856 __ srl(new_value_lo, 0, new_value_lo); 2857 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2858 // perform the compare and swap operation 2859 __ casx(addr, t1, t2); 2860 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2861 // overwritten with the original value in "addr" and will be equal to t1. 2862 // Produce icc flag for 32bit. 2863 __ sub(t1, t2, t2); 2864 __ srlx(t2, 32, t1); 2865 __ orcc(t2, t1, G0); 2866 #endif 2867 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2868 Register addr = op->addr()->as_pointer_register(); 2869 Register cmp_value = op->cmp_value()->as_register(); 2870 Register new_value = op->new_value()->as_register(); 2871 Register t1 = op->tmp1()->as_register(); 2872 Register t2 = op->tmp2()->as_register(); 2873 __ mov(cmp_value, t1); 2874 __ mov(new_value, t2); 2875 if (op->code() == lir_cas_obj) { 2876 if (UseCompressedOops) { 2877 __ encode_heap_oop(t1); 2878 __ encode_heap_oop(t2); 2879 __ cas(addr, t1, t2); 2880 } else { 2881 __ cas_ptr(addr, t1, t2); 2882 } 2883 } else { 2884 __ cas(addr, t1, t2); 2885 } 2886 __ cmp(t1, t2); 2887 } else { 2888 Unimplemented(); 2889 } 2890 } 2891 2892 void LIR_Assembler::set_24bit_FPU() { 2893 Unimplemented(); 2894 } 2895 2896 2897 void LIR_Assembler::reset_FPU() { 2898 Unimplemented(); 2899 } 2900 2901 2902 void LIR_Assembler::breakpoint() { 2903 __ breakpoint_trap(); 2904 } 2905 2906 2907 void LIR_Assembler::push(LIR_Opr opr) { 2908 Unimplemented(); 2909 } 2910 2911 2912 void LIR_Assembler::pop(LIR_Opr opr) { 2913 Unimplemented(); 2914 } 2915 2916 2917 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2918 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2919 Register dst = dst_opr->as_register(); 2920 Register reg = mon_addr.base(); 2921 int offset = mon_addr.disp(); 2922 // compute pointer to BasicLock 2923 if (mon_addr.is_simm13()) { 2924 __ add(reg, offset, dst); 2925 } else { 2926 __ set(offset, dst); 2927 __ add(dst, reg, dst); 2928 } 2929 } 2930 2931 2932 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2933 Register obj = op->obj_opr()->as_register(); 2934 Register hdr = op->hdr_opr()->as_register(); 2935 Register lock = op->lock_opr()->as_register(); 2936 2937 // obj may not be an oop 2938 if (op->code() == lir_lock) { 2939 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2940 if (UseFastLocking) { 2941 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2942 // add debug info for NullPointerException only if one is possible 2943 if (op->info() != NULL) { 2944 add_debug_info_for_null_check_here(op->info()); 2945 } 2946 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2947 } else { 2948 // always do slow locking 2949 // note: the slow locking code could be inlined here, however if we use 2950 // slow locking, speed doesn't matter anyway and this solution is 2951 // simpler and requires less duplicated code - additionally, the 2952 // slow locking code is the same in either case which simplifies 2953 // debugging 2954 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2955 __ delayed()->nop(); 2956 } 2957 } else { 2958 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2959 if (UseFastLocking) { 2960 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2961 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2962 } else { 2963 // always do slow unlocking 2964 // note: the slow unlocking code could be inlined here, however if we use 2965 // slow unlocking, speed doesn't matter anyway and this solution is 2966 // simpler and requires less duplicated code - additionally, the 2967 // slow unlocking code is the same in either case which simplifies 2968 // debugging 2969 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2970 __ delayed()->nop(); 2971 } 2972 } 2973 __ bind(*op->stub()->continuation()); 2974 } 2975 2976 2977 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2978 ciMethod* method = op->profiled_method(); 2979 int bci = op->profiled_bci(); 2980 2981 // Update counter for all call types 2982 ciMethodData* md = method->method_data_or_null(); 2983 assert(md != NULL, "Sanity"); 2984 ciProfileData* data = md->bci_to_data(bci); 2985 assert(data->is_CounterData(), "need CounterData for calls"); 2986 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2987 Register mdo = op->mdo()->as_register(); 2988 #ifdef _LP64 2989 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2990 Register tmp1 = op->tmp1()->as_register_lo(); 2991 #else 2992 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2993 Register tmp1 = op->tmp1()->as_register(); 2994 #endif 2995 jobject2reg(md->constant_encoding(), mdo); 2996 int mdo_offset_bias = 0; 2997 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2998 data->size_in_bytes())) { 2999 // The offset is large so bias the mdo by the base of the slot so 3000 // that the ld can use simm13s to reference the slots of the data 3001 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 3002 __ set(mdo_offset_bias, O7); 3003 __ add(mdo, O7, mdo); 3004 } 3005 3006 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 3007 Bytecodes::Code bc = method->java_code_at_bci(bci); 3008 // Perform additional virtual call profiling for invokevirtual and 3009 // invokeinterface bytecodes 3010 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3011 C1ProfileVirtualCalls) { 3012 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3013 Register recv = op->recv()->as_register(); 3014 assert_different_registers(mdo, tmp1, recv); 3015 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3016 ciKlass* known_klass = op->known_holder(); 3017 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3018 // We know the type that will be seen at this call site; we can 3019 // statically update the methodDataOop rather than needing to do 3020 // dynamic tests on the receiver type 3021 3022 // NOTE: we should probably put a lock around this search to 3023 // avoid collisions by concurrent compilations 3024 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3025 uint i; 3026 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3027 ciKlass* receiver = vc_data->receiver(i); 3028 if (known_klass->equals(receiver)) { 3029 Address data_addr(mdo, md->byte_offset_of_slot(data, 3030 VirtualCallData::receiver_count_offset(i)) - 3031 mdo_offset_bias); 3032 __ ld_ptr(data_addr, tmp1); 3033 __ add(tmp1, DataLayout::counter_increment, tmp1); 3034 __ st_ptr(tmp1, data_addr); 3035 return; 3036 } 3037 } 3038 3039 // Receiver type not found in profile data; select an empty slot 3040 3041 // Note that this is less efficient than it should be because it 3042 // always does a write to the receiver part of the 3043 // VirtualCallData rather than just the first time 3044 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3045 ciKlass* receiver = vc_data->receiver(i); 3046 if (receiver == NULL) { 3047 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 3048 mdo_offset_bias); 3049 jobject2reg(known_klass->constant_encoding(), tmp1); 3050 __ st_ptr(tmp1, recv_addr); 3051 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 3052 mdo_offset_bias); 3053 __ ld_ptr(data_addr, tmp1); 3054 __ add(tmp1, DataLayout::counter_increment, tmp1); 3055 __ st_ptr(tmp1, data_addr); 3056 return; 3057 } 3058 } 3059 } else { 3060 __ load_klass(recv, recv); 3061 Label update_done; 3062 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 3063 // Receiver did not match any saved receiver and there is no empty row for it. 3064 // Increment total counter to indicate polymorphic case. 3065 __ ld_ptr(counter_addr, tmp1); 3066 __ add(tmp1, DataLayout::counter_increment, tmp1); 3067 __ st_ptr(tmp1, counter_addr); 3068 3069 __ bind(update_done); 3070 } 3071 } else { 3072 // Static call 3073 __ ld_ptr(counter_addr, tmp1); 3074 __ add(tmp1, DataLayout::counter_increment, tmp1); 3075 __ st_ptr(tmp1, counter_addr); 3076 } 3077 } 3078 3079 void LIR_Assembler::align_backward_branch_target() { 3080 __ align(OptoLoopAlignment); 3081 } 3082 3083 3084 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 3085 // make sure we are expecting a delay 3086 // this has the side effect of clearing the delay state 3087 // so we can use _masm instead of _masm->delayed() to do the 3088 // code generation. 3089 __ delayed(); 3090 3091 // make sure we only emit one instruction 3092 int offset = code_offset(); 3093 op->delay_op()->emit_code(this); 3094 #ifdef ASSERT 3095 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3096 op->delay_op()->print(); 3097 } 3098 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3099 "only one instruction can go in a delay slot"); 3100 #endif 3101 3102 // we may also be emitting the call info for the instruction 3103 // which we are the delay slot of. 3104 CodeEmitInfo* call_info = op->call_info(); 3105 if (call_info) { 3106 add_call_info(code_offset(), call_info); 3107 } 3108 3109 if (VerifyStackAtCalls) { 3110 _masm->sub(FP, SP, O7); 3111 _masm->cmp(O7, initial_frame_size_in_bytes()); 3112 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3113 } 3114 } 3115 3116 3117 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3118 assert(left->is_register(), "can only handle registers"); 3119 3120 if (left->is_single_cpu()) { 3121 __ neg(left->as_register(), dest->as_register()); 3122 } else if (left->is_single_fpu()) { 3123 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3124 } else if (left->is_double_fpu()) { 3125 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3126 } else { 3127 assert (left->is_double_cpu(), "Must be a long"); 3128 Register Rlow = left->as_register_lo(); 3129 Register Rhi = left->as_register_hi(); 3130 #ifdef _LP64 3131 __ sub(G0, Rlow, dest->as_register_lo()); 3132 #else 3133 __ subcc(G0, Rlow, dest->as_register_lo()); 3134 __ subc (G0, Rhi, dest->as_register_hi()); 3135 #endif 3136 } 3137 } 3138 3139 3140 void LIR_Assembler::fxch(int i) { 3141 Unimplemented(); 3142 } 3143 3144 void LIR_Assembler::fld(int i) { 3145 Unimplemented(); 3146 } 3147 3148 void LIR_Assembler::ffree(int i) { 3149 Unimplemented(); 3150 } 3151 3152 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3153 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3154 3155 // if tmp is invalid, then the function being called doesn't destroy the thread 3156 if (tmp->is_valid()) { 3157 __ save_thread(tmp->as_register()); 3158 } 3159 __ call(dest, relocInfo::runtime_call_type); 3160 __ delayed()->nop(); 3161 if (info != NULL) { 3162 add_call_info_here(info); 3163 } 3164 if (tmp->is_valid()) { 3165 __ restore_thread(tmp->as_register()); 3166 } 3167 3168 #ifdef ASSERT 3169 __ verify_thread(); 3170 #endif // ASSERT 3171 } 3172 3173 3174 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3175 #ifdef _LP64 3176 ShouldNotReachHere(); 3177 #endif 3178 3179 NEEDS_CLEANUP; 3180 if (type == T_LONG) { 3181 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3182 3183 // (extended to allow indexed as well as constant displaced for JSR-166) 3184 Register idx = noreg; // contains either constant offset or index 3185 3186 int disp = mem_addr->disp(); 3187 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3188 if (!Assembler::is_simm13(disp)) { 3189 idx = O7; 3190 __ set(disp, idx); 3191 } 3192 } else { 3193 assert(disp == 0, "not both indexed and disp"); 3194 idx = mem_addr->index()->as_register(); 3195 } 3196 3197 int null_check_offset = -1; 3198 3199 Register base = mem_addr->base()->as_register(); 3200 if (src->is_register() && dest->is_address()) { 3201 // G4 is high half, G5 is low half 3202 if (VM_Version::v9_instructions_work()) { 3203 // clear the top bits of G5, and scale up G4 3204 __ srl (src->as_register_lo(), 0, G5); 3205 __ sllx(src->as_register_hi(), 32, G4); 3206 // combine the two halves into the 64 bits of G4 3207 __ or3(G4, G5, G4); 3208 null_check_offset = __ offset(); 3209 if (idx == noreg) { 3210 __ stx(G4, base, disp); 3211 } else { 3212 __ stx(G4, base, idx); 3213 } 3214 } else { 3215 __ mov (src->as_register_hi(), G4); 3216 __ mov (src->as_register_lo(), G5); 3217 null_check_offset = __ offset(); 3218 if (idx == noreg) { 3219 __ std(G4, base, disp); 3220 } else { 3221 __ std(G4, base, idx); 3222 } 3223 } 3224 } else if (src->is_address() && dest->is_register()) { 3225 null_check_offset = __ offset(); 3226 if (VM_Version::v9_instructions_work()) { 3227 if (idx == noreg) { 3228 __ ldx(base, disp, G5); 3229 } else { 3230 __ ldx(base, idx, G5); 3231 } 3232 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3233 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3234 } else { 3235 if (idx == noreg) { 3236 __ ldd(base, disp, G4); 3237 } else { 3238 __ ldd(base, idx, G4); 3239 } 3240 // G4 is high half, G5 is low half 3241 __ mov (G4, dest->as_register_hi()); 3242 __ mov (G5, dest->as_register_lo()); 3243 } 3244 } else { 3245 Unimplemented(); 3246 } 3247 if (info != NULL) { 3248 add_debug_info_for_null_check(null_check_offset, info); 3249 } 3250 3251 } else { 3252 // use normal move for all other volatiles since they don't need 3253 // special handling to remain atomic. 3254 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3255 } 3256 } 3257 3258 void LIR_Assembler::membar() { 3259 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3260 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3261 } 3262 3263 void LIR_Assembler::membar_acquire() { 3264 // no-op on TSO 3265 } 3266 3267 void LIR_Assembler::membar_release() { 3268 // no-op on TSO 3269 } 3270 3271 void LIR_Assembler::membar_loadload() { 3272 // no-op 3273 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3274 } 3275 3276 void LIR_Assembler::membar_storestore() { 3277 // no-op 3278 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3279 } 3280 3281 void LIR_Assembler::membar_loadstore() { 3282 // no-op 3283 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3284 } 3285 3286 void LIR_Assembler::membar_storeload() { 3287 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3288 } 3289 3290 3291 // Pack two sequential registers containing 32 bit values 3292 // into a single 64 bit register. 3293 // src and src->successor() are packed into dst 3294 // src and dst may be the same register. 3295 // Note: src is destroyed 3296 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3297 Register rs = src->as_register(); 3298 Register rd = dst->as_register_lo(); 3299 __ sllx(rs, 32, rs); 3300 __ srl(rs->successor(), 0, rs->successor()); 3301 __ or3(rs, rs->successor(), rd); 3302 } 3303 3304 // Unpack a 64 bit value in a register into 3305 // two sequential registers. 3306 // src is unpacked into dst and dst->successor() 3307 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3308 Register rs = src->as_register_lo(); 3309 Register rd = dst->as_register_hi(); 3310 assert_different_registers(rs, rd, rd->successor()); 3311 __ srlx(rs, 32, rd); 3312 __ srl (rs, 0, rd->successor()); 3313 } 3314 3315 3316 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 3317 LIR_Address* addr = addr_opr->as_address_ptr(); 3318 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); 3319 3320 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3321 } 3322 3323 3324 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3325 assert(result_reg->is_register(), "check"); 3326 __ mov(G2_thread, result_reg->as_register()); 3327 } 3328 3329 3330 void LIR_Assembler::peephole(LIR_List* lir) { 3331 LIR_OpList* inst = lir->instructions_list(); 3332 for (int i = 0; i < inst->length(); i++) { 3333 LIR_Op* op = inst->at(i); 3334 switch (op->code()) { 3335 case lir_cond_float_branch: 3336 case lir_branch: { 3337 LIR_OpBranch* branch = op->as_OpBranch(); 3338 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3339 LIR_Op* delay_op = NULL; 3340 // we'd like to be able to pull following instructions into 3341 // this slot but we don't know enough to do it safely yet so 3342 // only optimize block to block control flow. 3343 if (LIRFillDelaySlots && branch->block()) { 3344 LIR_Op* prev = inst->at(i - 1); 3345 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3346 // swap previous instruction into delay slot 3347 inst->at_put(i - 1, op); 3348 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3349 #ifndef PRODUCT 3350 if (LIRTracePeephole) { 3351 tty->print_cr("delayed"); 3352 inst->at(i - 1)->print(); 3353 inst->at(i)->print(); 3354 tty->cr(); 3355 } 3356 #endif 3357 continue; 3358 } 3359 } 3360 3361 if (!delay_op) { 3362 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3363 } 3364 inst->insert_before(i + 1, delay_op); 3365 break; 3366 } 3367 case lir_static_call: 3368 case lir_virtual_call: 3369 case lir_icvirtual_call: 3370 case lir_optvirtual_call: 3371 case lir_dynamic_call: { 3372 LIR_Op* prev = inst->at(i - 1); 3373 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3374 (op->code() != lir_virtual_call || 3375 !prev->result_opr()->is_single_cpu() || 3376 prev->result_opr()->as_register() != O0) && 3377 LIR_Assembler::is_single_instruction(prev)) { 3378 // Only moves without info can be put into the delay slot. 3379 // Also don't allow the setup of the receiver in the delay 3380 // slot for vtable calls. 3381 inst->at_put(i - 1, op); 3382 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3383 #ifndef PRODUCT 3384 if (LIRTracePeephole) { 3385 tty->print_cr("delayed"); 3386 inst->at(i - 1)->print(); 3387 inst->at(i)->print(); 3388 tty->cr(); 3389 } 3390 #endif 3391 } else { 3392 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3393 inst->insert_before(i + 1, delay_op); 3394 i++; 3395 } 3396 3397 #if defined(TIERED) && !defined(_LP64) 3398 // fixup the return value from G1 to O0/O1 for long returns. 3399 // It's done here instead of in LIRGenerator because there's 3400 // such a mismatch between the single reg and double reg 3401 // calling convention. 3402 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3403 if (callop->result_opr() == FrameMap::out_long_opr) { 3404 LIR_OpJavaCall* call; 3405 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3406 for (int a = 0; a < arguments->length(); a++) { 3407 arguments[a] = callop->arguments()[a]; 3408 } 3409 if (op->code() == lir_virtual_call) { 3410 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3411 callop->vtable_offset(), arguments, callop->info()); 3412 } else { 3413 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3414 callop->addr(), arguments, callop->info()); 3415 } 3416 inst->at_put(i - 1, call); 3417 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3418 T_LONG, lir_patch_none, NULL)); 3419 } 3420 #endif 3421 break; 3422 } 3423 } 3424 } 3425 } 3426 3427 3428 3429 3430 #undef __