1 /* 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_LIRAssembler_sparc.cpp.incl" 27 28 #define __ _masm-> 29 30 31 //------------------------------------------------------------ 32 33 34 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 35 if (opr->is_constant()) { 36 LIR_Const* constant = opr->as_constant_ptr(); 37 switch (constant->type()) { 38 case T_INT: { 39 jint value = constant->as_jint(); 40 return Assembler::is_simm13(value); 41 } 42 43 default: 44 return false; 45 } 46 } 47 return false; 48 } 49 50 51 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 52 switch (op->code()) { 53 case lir_null_check: 54 return true; 55 56 57 case lir_add: 58 case lir_ushr: 59 case lir_shr: 60 case lir_shl: 61 // integer shifts and adds are always one instruction 62 return op->result_opr()->is_single_cpu(); 63 64 65 case lir_move: { 66 LIR_Op1* op1 = op->as_Op1(); 67 LIR_Opr src = op1->in_opr(); 68 LIR_Opr dst = op1->result_opr(); 69 70 if (src == dst) { 71 NEEDS_CLEANUP; 72 // this works around a problem where moves with the same src and dst 73 // end up in the delay slot and then the assembler swallows the mov 74 // since it has no effect and then it complains because the delay slot 75 // is empty. returning false stops the optimizer from putting this in 76 // the delay slot 77 return false; 78 } 79 80 // don't put moves involving oops into the delay slot since the VerifyOops code 81 // will make it much larger than a single instruction. 82 if (VerifyOops) { 83 return false; 84 } 85 86 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 87 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 88 return false; 89 } 90 91 #ifdef _LP64 92 if (UseCompressedOops) { 93 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 94 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 95 } 96 #endif 97 98 if (dst->is_register()) { 99 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 100 return !PatchALot; 101 } else if (src->is_single_stack()) { 102 return true; 103 } 104 } 105 106 if (src->is_register()) { 107 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 108 return !PatchALot; 109 } else if (dst->is_single_stack()) { 110 return true; 111 } 112 } 113 114 if (dst->is_register() && 115 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 116 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 117 return true; 118 } 119 120 return false; 121 } 122 123 default: 124 return false; 125 } 126 ShouldNotReachHere(); 127 } 128 129 130 LIR_Opr LIR_Assembler::receiverOpr() { 131 return FrameMap::O0_oop_opr; 132 } 133 134 135 LIR_Opr LIR_Assembler::incomingReceiverOpr() { 136 return FrameMap::I0_oop_opr; 137 } 138 139 140 LIR_Opr LIR_Assembler::osrBufferPointer() { 141 return FrameMap::I0_opr; 142 } 143 144 145 int LIR_Assembler::initial_frame_size_in_bytes() { 146 return in_bytes(frame_map()->framesize_in_bytes()); 147 } 148 149 150 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 151 // we fetch the class of the receiver (O0) and compare it with the cached class. 152 // If they do not match we jump to slow case. 153 int LIR_Assembler::check_icache() { 154 int offset = __ offset(); 155 __ inline_cache_check(O0, G5_inline_cache_reg); 156 return offset; 157 } 158 159 160 void LIR_Assembler::osr_entry() { 161 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 162 // 163 // 1. Create a new compiled activation. 164 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 165 // at the osr_bci; it is not initialized. 166 // 3. Jump to the continuation address in compiled code to resume execution. 167 168 // OSR entry point 169 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 170 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 171 ValueStack* entry_state = osr_entry->end()->state(); 172 int number_of_locks = entry_state->locks_size(); 173 174 // Create a frame for the compiled activation. 175 __ build_frame(initial_frame_size_in_bytes()); 176 177 // OSR buffer is 178 // 179 // locals[nlocals-1..0] 180 // monitors[number_of_locks-1..0] 181 // 182 // locals is a direct copy of the interpreter frame so in the osr buffer 183 // so first slot in the local array is the last local from the interpreter 184 // and last slot is local[0] (receiver) from the interpreter 185 // 186 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 187 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 188 // in the interpreter frame (the method lock if a sync method) 189 190 // Initialize monitors in the compiled activation. 191 // I0: pointer to osr buffer 192 // 193 // All other registers are dead at this point and the locals will be 194 // copied into place by code emitted in the IR. 195 196 Register OSR_buf = osrBufferPointer()->as_register(); 197 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 198 int monitor_offset = BytesPerWord * method()->max_locals() + 199 (2 * BytesPerWord) * (number_of_locks - 1); 200 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 201 // the OSR buffer using 2 word entries: first the lock and then 202 // the oop. 203 for (int i = 0; i < number_of_locks; i++) { 204 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 205 #ifdef ASSERT 206 // verify the interpreter's monitor has a non-null object 207 { 208 Label L; 209 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 210 __ cmp(G0, O7); 211 __ br(Assembler::notEqual, false, Assembler::pt, L); 212 __ delayed()->nop(); 213 __ stop("locked object is NULL"); 214 __ bind(L); 215 } 216 #endif // ASSERT 217 // Copy the lock field into the compiled activation. 218 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 219 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 220 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 221 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 222 } 223 } 224 } 225 226 227 // Optimized Library calls 228 // This is the fast version of java.lang.String.compare; it has not 229 // OSR-entry and therefore, we generate a slow version for OSR's 230 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { 231 Register str0 = left->as_register(); 232 Register str1 = right->as_register(); 233 234 Label Ldone; 235 236 Register result = dst->as_register(); 237 { 238 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0 239 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1 240 // Also, get string0.count-string1.count in o7 and get the condition code set 241 // Note: some instructions have been hoisted for better instruction scheduling 242 243 Register tmp0 = L0; 244 Register tmp1 = L1; 245 Register tmp2 = L2; 246 247 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array 248 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 249 int count_offset = java_lang_String:: count_offset_in_bytes(); 250 251 __ load_heap_oop(str0, value_offset, tmp0); 252 __ ld(str0, offset_offset, tmp2); 253 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 254 __ ld(str0, count_offset, str0); 255 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 256 257 // str1 may be null 258 add_debug_info_for_null_check_here(info); 259 260 __ load_heap_oop(str1, value_offset, tmp1); 261 __ add(tmp0, tmp2, tmp0); 262 263 __ ld(str1, offset_offset, tmp2); 264 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 265 __ ld(str1, count_offset, str1); 266 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 267 __ subcc(str0, str1, O7); 268 __ add(tmp1, tmp2, tmp1); 269 } 270 271 { 272 // Compute the minimum of the string lengths, scale it and store it in limit 273 Register count0 = I0; 274 Register count1 = I1; 275 Register limit = L3; 276 277 Label Lskip; 278 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter 279 __ br(Assembler::greater, true, Assembler::pt, Lskip); 280 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter 281 __ bind(Lskip); 282 283 // If either string is empty (or both of them) the result is the difference in lengths 284 __ cmp(limit, 0); 285 __ br(Assembler::equal, true, Assembler::pn, Ldone); 286 __ delayed()->mov(O7, result); // result is difference in lengths 287 } 288 289 { 290 // Neither string is empty 291 Label Lloop; 292 293 Register base0 = L0; 294 Register base1 = L1; 295 Register chr0 = I0; 296 Register chr1 = I1; 297 Register limit = L3; 298 299 // Shift base0 and base1 to the end of the arrays, negate limit 300 __ add(base0, limit, base0); 301 __ add(base1, limit, base1); 302 __ neg(limit); // limit = -min{string0.count, strin1.count} 303 304 __ lduh(base0, limit, chr0); 305 __ bind(Lloop); 306 __ lduh(base1, limit, chr1); 307 __ subcc(chr0, chr1, chr0); 308 __ br(Assembler::notZero, false, Assembler::pn, Ldone); 309 assert(chr0 == result, "result must be pre-placed"); 310 __ delayed()->inccc(limit, sizeof(jchar)); 311 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 312 __ delayed()->lduh(base0, limit, chr0); 313 } 314 315 // If strings are equal up to min length, return the length difference. 316 __ mov(O7, result); 317 318 // Otherwise, return the difference between the first mismatched chars. 319 __ bind(Ldone); 320 } 321 322 323 // -------------------------------------------------------------------------------------------- 324 325 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 326 if (!GenerateSynchronizationCode) return; 327 328 Register obj_reg = obj_opr->as_register(); 329 Register lock_reg = lock_opr->as_register(); 330 331 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 332 Register reg = mon_addr.base(); 333 int offset = mon_addr.disp(); 334 // compute pointer to BasicLock 335 if (mon_addr.is_simm13()) { 336 __ add(reg, offset, lock_reg); 337 } 338 else { 339 __ set(offset, lock_reg); 340 __ add(reg, lock_reg, lock_reg); 341 } 342 // unlock object 343 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 344 // _slow_case_stubs->append(slow_case); 345 // temporary fix: must be created after exceptionhandler, therefore as call stub 346 _slow_case_stubs->append(slow_case); 347 if (UseFastLocking) { 348 // try inlined fast unlocking first, revert to slow locking if it fails 349 // note: lock_reg points to the displaced header since the displaced header offset is 0! 350 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 351 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 352 } else { 353 // always do slow unlocking 354 // note: the slow unlocking code could be inlined here, however if we use 355 // slow unlocking, speed doesn't matter anyway and this solution is 356 // simpler and requires less duplicated code - additionally, the 357 // slow unlocking code is the same in either case which simplifies 358 // debugging 359 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 360 __ delayed()->nop(); 361 } 362 // done 363 __ bind(*slow_case->continuation()); 364 } 365 366 367 int LIR_Assembler::emit_exception_handler() { 368 // if the last instruction is a call (typically to do a throw which 369 // is coming at the end after block reordering) the return address 370 // must still point into the code area in order to avoid assertion 371 // failures when searching for the corresponding bci => add a nop 372 // (was bug 5/14/1999 - gri) 373 __ nop(); 374 375 // generate code for exception handler 376 ciMethod* method = compilation()->method(); 377 378 address handler_base = __ start_a_stub(exception_handler_size); 379 380 if (handler_base == NULL) { 381 // not enough space left for the handler 382 bailout("exception handler overflow"); 383 return -1; 384 } 385 386 int offset = code_offset(); 387 388 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 389 __ delayed()->nop(); 390 debug_only(__ stop("should have gone to the caller");) 391 assert(code_offset() - offset <= exception_handler_size, "overflow"); 392 __ end_a_stub(); 393 394 return offset; 395 } 396 397 398 // Emit the code to remove the frame from the stack in the exception 399 // unwind path. 400 int LIR_Assembler::emit_unwind_handler() { 401 #ifndef PRODUCT 402 if (CommentedAssembly) { 403 _masm->block_comment("Unwind handler"); 404 } 405 #endif 406 407 int offset = code_offset(); 408 409 // Fetch the exception from TLS and clear out exception related thread state 410 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 411 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 412 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 413 414 __ bind(_unwind_handler_entry); 415 __ verify_not_null_oop(O0); 416 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 417 __ mov(O0, I0); // Preserve the exception 418 } 419 420 // Preform needed unlocking 421 MonitorExitStub* stub = NULL; 422 if (method()->is_synchronized()) { 423 monitor_address(0, FrameMap::I1_opr); 424 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 425 __ unlock_object(I3, I2, I1, *stub->entry()); 426 __ bind(*stub->continuation()); 427 } 428 429 if (compilation()->env()->dtrace_method_probes()) { 430 __ mov(G2_thread, O0); 431 jobject2reg(method()->constant_encoding(), O1); 432 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 433 __ delayed()->nop(); 434 } 435 436 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 437 __ mov(I0, O0); // Restore the exception 438 } 439 440 // dispatch to the unwind logic 441 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 442 __ delayed()->nop(); 443 444 // Emit the slow path assembly 445 if (stub != NULL) { 446 stub->emit_code(this); 447 } 448 449 return offset; 450 } 451 452 453 int LIR_Assembler::emit_deopt_handler() { 454 // if the last instruction is a call (typically to do a throw which 455 // is coming at the end after block reordering) the return address 456 // must still point into the code area in order to avoid assertion 457 // failures when searching for the corresponding bci => add a nop 458 // (was bug 5/14/1999 - gri) 459 __ nop(); 460 461 // generate code for deopt handler 462 ciMethod* method = compilation()->method(); 463 address handler_base = __ start_a_stub(deopt_handler_size); 464 if (handler_base == NULL) { 465 // not enough space left for the handler 466 bailout("deopt handler overflow"); 467 return -1; 468 } 469 470 int offset = code_offset(); 471 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 472 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 473 __ delayed()->nop(); 474 assert(code_offset() - offset <= deopt_handler_size, "overflow"); 475 debug_only(__ stop("should have gone to the caller");) 476 __ end_a_stub(); 477 478 return offset; 479 } 480 481 482 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 483 if (o == NULL) { 484 __ set(NULL_WORD, reg); 485 } else { 486 int oop_index = __ oop_recorder()->find_index(o); 487 RelocationHolder rspec = oop_Relocation::spec(oop_index); 488 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 489 } 490 } 491 492 493 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 494 // Allocate a new index in oop table to hold the oop once it's been patched 495 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); 496 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); 497 498 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 499 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 500 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 501 // NULL will be dynamically patched later and the patched value may be large. We must 502 // therefore generate the sethi/add as a placeholders 503 __ patchable_set(addrlit, reg); 504 505 patching_epilog(patch, lir_patch_normal, reg, info); 506 } 507 508 509 void LIR_Assembler::emit_op3(LIR_Op3* op) { 510 Register Rdividend = op->in_opr1()->as_register(); 511 Register Rdivisor = noreg; 512 Register Rscratch = op->in_opr3()->as_register(); 513 Register Rresult = op->result_opr()->as_register(); 514 int divisor = -1; 515 516 if (op->in_opr2()->is_register()) { 517 Rdivisor = op->in_opr2()->as_register(); 518 } else { 519 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 520 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 521 } 522 523 assert(Rdividend != Rscratch, ""); 524 assert(Rdivisor != Rscratch, ""); 525 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 526 527 if (Rdivisor == noreg && is_power_of_2(divisor)) { 528 // convert division by a power of two into some shifts and logical operations 529 if (op->code() == lir_idiv) { 530 if (divisor == 2) { 531 __ srl(Rdividend, 31, Rscratch); 532 } else { 533 __ sra(Rdividend, 31, Rscratch); 534 __ and3(Rscratch, divisor - 1, Rscratch); 535 } 536 __ add(Rdividend, Rscratch, Rscratch); 537 __ sra(Rscratch, log2_intptr(divisor), Rresult); 538 return; 539 } else { 540 if (divisor == 2) { 541 __ srl(Rdividend, 31, Rscratch); 542 } else { 543 __ sra(Rdividend, 31, Rscratch); 544 __ and3(Rscratch, divisor - 1,Rscratch); 545 } 546 __ add(Rdividend, Rscratch, Rscratch); 547 __ andn(Rscratch, divisor - 1,Rscratch); 548 __ sub(Rdividend, Rscratch, Rresult); 549 return; 550 } 551 } 552 553 __ sra(Rdividend, 31, Rscratch); 554 __ wry(Rscratch); 555 if (!VM_Version::v9_instructions_work()) { 556 // v9 doesn't require these nops 557 __ nop(); 558 __ nop(); 559 __ nop(); 560 __ nop(); 561 } 562 563 add_debug_info_for_div0_here(op->info()); 564 565 if (Rdivisor != noreg) { 566 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 567 } else { 568 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 569 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 570 } 571 572 Label skip; 573 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 574 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 575 __ bind(skip); 576 577 if (op->code() == lir_irem) { 578 if (Rdivisor != noreg) { 579 __ smul(Rscratch, Rdivisor, Rscratch); 580 } else { 581 __ smul(Rscratch, divisor, Rscratch); 582 } 583 __ sub(Rdividend, Rscratch, Rresult); 584 } 585 } 586 587 588 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 589 #ifdef ASSERT 590 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 591 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 592 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 593 #endif 594 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 595 596 if (op->cond() == lir_cond_always) { 597 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 598 } else if (op->code() == lir_cond_float_branch) { 599 assert(op->ublock() != NULL, "must have unordered successor"); 600 bool is_unordered = (op->ublock() == op->block()); 601 Assembler::Condition acond; 602 switch (op->cond()) { 603 case lir_cond_equal: acond = Assembler::f_equal; break; 604 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 605 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 606 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 607 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 608 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 609 default : ShouldNotReachHere(); 610 }; 611 612 if (!VM_Version::v9_instructions_work()) { 613 __ nop(); 614 } 615 __ fb( acond, false, Assembler::pn, *(op->label())); 616 } else { 617 assert (op->code() == lir_branch, "just checking"); 618 619 Assembler::Condition acond; 620 switch (op->cond()) { 621 case lir_cond_equal: acond = Assembler::equal; break; 622 case lir_cond_notEqual: acond = Assembler::notEqual; break; 623 case lir_cond_less: acond = Assembler::less; break; 624 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 625 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 626 case lir_cond_greater: acond = Assembler::greater; break; 627 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 628 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 629 default: ShouldNotReachHere(); 630 }; 631 632 // sparc has different condition codes for testing 32-bit 633 // vs. 64-bit values. We could always test xcc is we could 634 // guarantee that 32-bit loads always sign extended but that isn't 635 // true and since sign extension isn't free, it would impose a 636 // slight cost. 637 #ifdef _LP64 638 if (op->type() == T_INT) { 639 __ br(acond, false, Assembler::pn, *(op->label())); 640 } else 641 #endif 642 __ brx(acond, false, Assembler::pn, *(op->label())); 643 } 644 // The peephole pass fills the delay slot 645 } 646 647 648 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 649 Bytecodes::Code code = op->bytecode(); 650 LIR_Opr dst = op->result_opr(); 651 652 switch(code) { 653 case Bytecodes::_i2l: { 654 Register rlo = dst->as_register_lo(); 655 Register rhi = dst->as_register_hi(); 656 Register rval = op->in_opr()->as_register(); 657 #ifdef _LP64 658 __ sra(rval, 0, rlo); 659 #else 660 __ mov(rval, rlo); 661 __ sra(rval, BitsPerInt-1, rhi); 662 #endif 663 break; 664 } 665 case Bytecodes::_i2d: 666 case Bytecodes::_i2f: { 667 bool is_double = (code == Bytecodes::_i2d); 668 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 669 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 670 FloatRegister rsrc = op->in_opr()->as_float_reg(); 671 if (rsrc != rdst) { 672 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 673 } 674 __ fitof(w, rdst, rdst); 675 break; 676 } 677 case Bytecodes::_f2i:{ 678 FloatRegister rsrc = op->in_opr()->as_float_reg(); 679 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 680 Label L; 681 // result must be 0 if value is NaN; test by comparing value to itself 682 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 683 if (!VM_Version::v9_instructions_work()) { 684 __ nop(); 685 } 686 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 687 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 688 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 689 // move integer result from float register to int register 690 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 691 __ bind (L); 692 break; 693 } 694 case Bytecodes::_l2i: { 695 Register rlo = op->in_opr()->as_register_lo(); 696 Register rhi = op->in_opr()->as_register_hi(); 697 Register rdst = dst->as_register(); 698 #ifdef _LP64 699 __ sra(rlo, 0, rdst); 700 #else 701 __ mov(rlo, rdst); 702 #endif 703 break; 704 } 705 case Bytecodes::_d2f: 706 case Bytecodes::_f2d: { 707 bool is_double = (code == Bytecodes::_f2d); 708 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 709 LIR_Opr val = op->in_opr(); 710 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 711 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 712 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 713 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 714 __ ftof(vw, dw, rval, rdst); 715 break; 716 } 717 case Bytecodes::_i2s: 718 case Bytecodes::_i2b: { 719 Register rval = op->in_opr()->as_register(); 720 Register rdst = dst->as_register(); 721 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 722 __ sll (rval, shift, rdst); 723 __ sra (rdst, shift, rdst); 724 break; 725 } 726 case Bytecodes::_i2c: { 727 Register rval = op->in_opr()->as_register(); 728 Register rdst = dst->as_register(); 729 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 730 __ sll (rval, shift, rdst); 731 __ srl (rdst, shift, rdst); 732 break; 733 } 734 735 default: ShouldNotReachHere(); 736 } 737 } 738 739 740 void LIR_Assembler::align_call(LIR_Code) { 741 // do nothing since all instructions are word aligned on sparc 742 } 743 744 745 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 746 __ call(op->addr(), rtype); 747 // The peephole pass fills the delay slot, add_call_info is done in 748 // LIR_Assembler::emit_delay. 749 } 750 751 752 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 753 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); 754 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); 755 __ relocate(rspec); 756 __ call(op->addr(), relocInfo::none); 757 // The peephole pass fills the delay slot, add_call_info is done in 758 // LIR_Assembler::emit_delay. 759 } 760 761 762 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 763 add_debug_info_for_null_check_here(op->info()); 764 #ifdef _LP64 765 if (UseCompressedOops) { 766 __ lduw(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); 767 __ decode_heap_oop(G3_scratch); 768 } else 769 #endif 770 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); 771 if (__ is_simm13(op->vtable_offset())) { 772 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 773 } else { 774 // This will generate 2 instructions 775 __ set(op->vtable_offset(), G5_method); 776 // ld_ptr, set_hi, set 777 __ ld_ptr(G3_scratch, G5_method, G5_method); 778 } 779 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); 780 __ callr(G3_scratch, G0); 781 // the peephole pass fills the delay slot 782 } 783 784 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 785 int store_offset; 786 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 787 assert(!unaligned, "can't handle this"); 788 // for offsets larger than a simm13 we setup the offset in O7 789 __ set(offset, O7); 790 store_offset = store(from_reg, base, O7, type, wide); 791 } else { 792 if (type == T_ARRAY || type == T_OBJECT) { 793 __ verify_oop(from_reg->as_register()); 794 } 795 store_offset = code_offset(); 796 switch (type) { 797 case T_BOOLEAN: // fall through 798 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 799 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 800 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 801 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 802 case T_LONG : 803 #ifdef _LP64 804 if (unaligned || PatchALot) { 805 __ srax(from_reg->as_register_lo(), 32, O7); 806 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 807 __ stw(O7, base, offset + hi_word_offset_in_bytes); 808 } else { 809 __ stx(from_reg->as_register_lo(), base, offset); 810 } 811 #else 812 assert(Assembler::is_simm13(offset + 4), "must be"); 813 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 814 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 815 #endif 816 break; 817 case T_ADDRESS: 818 __ st_ptr(from_reg->as_register(), base, offset); 819 break; 820 case T_ARRAY : // fall through 821 case T_OBJECT: 822 { 823 #ifdef _LP64 824 if (UseCompressedOops && !wide) { 825 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 826 store_offset = code_offset(); 827 __ stw(G3_scratch, base, offset); 828 } else 829 #endif 830 __ st_ptr(from_reg->as_register(), base, offset); 831 break; 832 } 833 834 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 835 case T_DOUBLE: 836 { 837 FloatRegister reg = from_reg->as_double_reg(); 838 // split unaligned stores 839 if (unaligned || PatchALot) { 840 assert(Assembler::is_simm13(offset + 4), "must be"); 841 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 842 __ stf(FloatRegisterImpl::S, reg, base, offset); 843 } else { 844 __ stf(FloatRegisterImpl::D, reg, base, offset); 845 } 846 break; 847 } 848 default : ShouldNotReachHere(); 849 } 850 } 851 return store_offset; 852 } 853 854 855 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 856 if (type == T_ARRAY || type == T_OBJECT) { 857 __ verify_oop(from_reg->as_register()); 858 } 859 int store_offset = code_offset(); 860 switch (type) { 861 case T_BOOLEAN: // fall through 862 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 863 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 864 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 865 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 866 case T_LONG : 867 #ifdef _LP64 868 __ stx(from_reg->as_register_lo(), base, disp); 869 #else 870 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 871 __ std(from_reg->as_register_hi(), base, disp); 872 #endif 873 break; 874 case T_ADDRESS: 875 __ st_ptr(from_reg->as_register(), base, disp); 876 break; 877 case T_ARRAY : // fall through 878 case T_OBJECT: 879 { 880 #ifdef _LP64 881 if (UseCompressedOops && !wide) { 882 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 883 store_offset = code_offset(); 884 __ stw(G3_scratch, base, disp); 885 } else 886 #endif 887 __ st_ptr(from_reg->as_register(), base, disp); 888 break; 889 } 890 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 891 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 892 default : ShouldNotReachHere(); 893 } 894 return store_offset; 895 } 896 897 898 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 899 int load_offset; 900 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 901 assert(base != O7, "destroying register"); 902 assert(!unaligned, "can't handle this"); 903 // for offsets larger than a simm13 we setup the offset in O7 904 __ set(offset, O7); 905 load_offset = load(base, O7, to_reg, type, wide); 906 } else { 907 load_offset = code_offset(); 908 switch(type) { 909 case T_BOOLEAN: // fall through 910 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 911 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 912 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 913 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 914 case T_LONG : 915 if (!unaligned) { 916 #ifdef _LP64 917 __ ldx(base, offset, to_reg->as_register_lo()); 918 #else 919 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 920 "must be sequential"); 921 __ ldd(base, offset, to_reg->as_register_hi()); 922 #endif 923 } else { 924 #ifdef _LP64 925 assert(base != to_reg->as_register_lo(), "can't handle this"); 926 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 927 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 928 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 929 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 930 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 931 #else 932 if (base == to_reg->as_register_lo()) { 933 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 934 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 935 } else { 936 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 937 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 938 } 939 #endif 940 } 941 break; 942 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; 943 case T_ARRAY : // fall through 944 case T_OBJECT: 945 { 946 #ifdef _LP64 947 if (UseCompressedOops && !wide) { 948 __ lduw(base, offset, to_reg->as_register()); 949 __ decode_heap_oop(to_reg->as_register(), to_reg->as_register()); 950 } else 951 #endif 952 __ ld_ptr(base, offset, to_reg->as_register()); 953 break; 954 } 955 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 956 case T_DOUBLE: 957 { 958 FloatRegister reg = to_reg->as_double_reg(); 959 // split unaligned loads 960 if (unaligned || PatchALot) { 961 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 962 __ ldf(FloatRegisterImpl::S, base, offset, reg); 963 } else { 964 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 965 } 966 break; 967 } 968 default : ShouldNotReachHere(); 969 } 970 if (type == T_ARRAY || type == T_OBJECT) { 971 __ verify_oop(to_reg->as_register()); 972 } 973 } 974 return load_offset; 975 } 976 977 978 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 979 int load_offset = code_offset(); 980 switch(type) { 981 case T_BOOLEAN: // fall through 982 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 983 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 984 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 985 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 986 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 987 case T_ARRAY : // fall through 988 case T_OBJECT: 989 { 990 #ifdef _LP64 991 if (UseCompressedOops && !wide) { 992 __ lduw(base, disp, to_reg->as_register()); 993 __ decode_heap_oop(to_reg->as_register(), to_reg->as_register()); 994 } else 995 #endif 996 __ ld_ptr(base, disp, to_reg->as_register()); 997 break; 998 } 999 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 1000 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 1001 case T_LONG : 1002 #ifdef _LP64 1003 __ ldx(base, disp, to_reg->as_register_lo()); 1004 #else 1005 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 1006 "must be sequential"); 1007 __ ldd(base, disp, to_reg->as_register_hi()); 1008 #endif 1009 break; 1010 default : ShouldNotReachHere(); 1011 } 1012 if (type == T_ARRAY || type == T_OBJECT) { 1013 __ verify_oop(to_reg->as_register()); 1014 } 1015 return load_offset; 1016 } 1017 1018 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 1019 LIR_Const* c = src->as_constant_ptr(); 1020 switch (c->type()) { 1021 case T_INT: 1022 case T_FLOAT: { 1023 Register src_reg = O7; 1024 int value = c->as_jint_bits(); 1025 if (value == 0) { 1026 src_reg = G0; 1027 } else { 1028 __ set(value, O7); 1029 } 1030 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1031 __ stw(src_reg, addr.base(), addr.disp()); 1032 break; 1033 } 1034 case T_ADDRESS: { 1035 Register src_reg = O7; 1036 int value = c->as_jint_bits(); 1037 if (value == 0) { 1038 src_reg = G0; 1039 } else { 1040 __ set(value, O7); 1041 } 1042 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1043 __ st_ptr(src_reg, addr.base(), addr.disp()); 1044 break; 1045 } 1046 case T_OBJECT: { 1047 Register src_reg = O7; 1048 jobject2reg(c->as_jobject(), src_reg); 1049 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1050 __ st_ptr(src_reg, addr.base(), addr.disp()); 1051 break; 1052 } 1053 case T_LONG: 1054 case T_DOUBLE: { 1055 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1056 1057 Register tmp = O7; 1058 int value_lo = c->as_jint_lo_bits(); 1059 if (value_lo == 0) { 1060 tmp = G0; 1061 } else { 1062 __ set(value_lo, O7); 1063 } 1064 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 1065 int value_hi = c->as_jint_hi_bits(); 1066 if (value_hi == 0) { 1067 tmp = G0; 1068 } else { 1069 __ set(value_hi, O7); 1070 } 1071 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 1072 break; 1073 } 1074 default: 1075 Unimplemented(); 1076 } 1077 } 1078 1079 1080 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1081 LIR_Const* c = src->as_constant_ptr(); 1082 LIR_Address* addr = dest->as_address_ptr(); 1083 Register base = addr->base()->as_pointer_register(); 1084 int offset; 1085 1086 switch (c->type()) { 1087 case T_INT: 1088 case T_FLOAT: 1089 case T_ADDRESS: { 1090 LIR_Opr tmp = FrameMap::O7_opr; 1091 int value = c->as_jint_bits(); 1092 if (value == 0) { 1093 tmp = FrameMap::G0_opr; 1094 } else if (Assembler::is_simm13(value)) { 1095 __ set(value, O7); 1096 } 1097 if (addr->index()->is_valid()) { 1098 assert(addr->disp() == 0, "must be zero"); 1099 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1100 } else { 1101 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1102 offset = store(tmp, base, addr->disp(), type, wide, false); 1103 } 1104 break; 1105 } 1106 case T_LONG: 1107 case T_DOUBLE: { 1108 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1109 assert(Assembler::is_simm13(addr->disp()) && 1110 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1111 1112 LIR_Opr tmp = FrameMap::O7_opr; 1113 int value_lo = c->as_jint_lo_bits(); 1114 if (value_lo == 0) { 1115 tmp = FrameMap::G0_opr; 1116 } else { 1117 __ set(value_lo, O7); 1118 } 1119 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1120 int value_hi = c->as_jint_hi_bits(); 1121 if (value_hi == 0) { 1122 tmp = FrameMap::G0_opr; 1123 } else { 1124 __ set(value_hi, O7); 1125 } 1126 offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1127 break; 1128 } 1129 case T_OBJECT: { 1130 jobject obj = c->as_jobject(); 1131 LIR_Opr tmp; 1132 if (obj == NULL) { 1133 tmp = FrameMap::G0_opr; 1134 } else { 1135 tmp = FrameMap::O7_opr; 1136 jobject2reg(c->as_jobject(), O7); 1137 } 1138 // handle either reg+reg or reg+disp address 1139 if (addr->index()->is_valid()) { 1140 assert(addr->disp() == 0, "must be zero"); 1141 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1142 } else { 1143 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1144 offset = store(tmp, base, addr->disp(), type, wide, false); 1145 } 1146 1147 break; 1148 } 1149 default: 1150 Unimplemented(); 1151 } 1152 if (info != NULL) { 1153 add_debug_info_for_null_check(offset, info); 1154 } 1155 } 1156 1157 1158 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1159 LIR_Const* c = src->as_constant_ptr(); 1160 LIR_Opr to_reg = dest; 1161 1162 switch (c->type()) { 1163 case T_INT: 1164 case T_ADDRESS: 1165 { 1166 jint con = c->as_jint(); 1167 if (to_reg->is_single_cpu()) { 1168 assert(patch_code == lir_patch_none, "no patching handled here"); 1169 __ set(con, to_reg->as_register()); 1170 } else { 1171 ShouldNotReachHere(); 1172 assert(to_reg->is_single_fpu(), "wrong register kind"); 1173 1174 __ set(con, O7); 1175 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1176 __ st(O7, temp_slot); 1177 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1178 } 1179 } 1180 break; 1181 1182 case T_LONG: 1183 { 1184 jlong con = c->as_jlong(); 1185 1186 if (to_reg->is_double_cpu()) { 1187 #ifdef _LP64 1188 __ set(con, to_reg->as_register_lo()); 1189 #else 1190 __ set(low(con), to_reg->as_register_lo()); 1191 __ set(high(con), to_reg->as_register_hi()); 1192 #endif 1193 #ifdef _LP64 1194 } else if (to_reg->is_single_cpu()) { 1195 __ set(con, to_reg->as_register()); 1196 #endif 1197 } else { 1198 ShouldNotReachHere(); 1199 assert(to_reg->is_double_fpu(), "wrong register kind"); 1200 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1201 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1202 __ set(low(con), O7); 1203 __ st(O7, temp_slot_lo); 1204 __ set(high(con), O7); 1205 __ st(O7, temp_slot_hi); 1206 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1207 } 1208 } 1209 break; 1210 1211 case T_OBJECT: 1212 { 1213 if (patch_code == lir_patch_none) { 1214 jobject2reg(c->as_jobject(), to_reg->as_register()); 1215 } else { 1216 jobject2reg_with_patching(to_reg->as_register(), info); 1217 } 1218 } 1219 break; 1220 1221 case T_FLOAT: 1222 { 1223 address const_addr = __ float_constant(c->as_jfloat()); 1224 if (const_addr == NULL) { 1225 bailout("const section overflow"); 1226 break; 1227 } 1228 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1229 AddressLiteral const_addrlit(const_addr, rspec); 1230 if (to_reg->is_single_fpu()) { 1231 __ patchable_sethi(const_addrlit, O7); 1232 __ relocate(rspec); 1233 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1234 1235 } else { 1236 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1237 1238 __ set(const_addrlit, O7); 1239 __ ld(O7, 0, to_reg->as_register()); 1240 // load(O7, 0, to_reg, T_INT, false /*wide*/, false /*unaligned*/); 1241 } 1242 } 1243 break; 1244 1245 case T_DOUBLE: 1246 { 1247 address const_addr = __ double_constant(c->as_jdouble()); 1248 if (const_addr == NULL) { 1249 bailout("const section overflow"); 1250 break; 1251 } 1252 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1253 1254 if (to_reg->is_double_fpu()) { 1255 AddressLiteral const_addrlit(const_addr, rspec); 1256 __ patchable_sethi(const_addrlit, O7); 1257 __ relocate(rspec); 1258 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1259 } else { 1260 assert(to_reg->is_double_cpu(), "Must be a long register."); 1261 #ifdef _LP64 1262 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1263 #else 1264 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1265 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1266 #endif 1267 } 1268 1269 } 1270 break; 1271 1272 default: 1273 ShouldNotReachHere(); 1274 } 1275 } 1276 1277 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1278 Register reg = addr->base()->as_register(); 1279 return Address(reg, addr->disp()); 1280 } 1281 1282 1283 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1284 switch (type) { 1285 case T_INT: 1286 case T_FLOAT: { 1287 Register tmp = O7; 1288 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1289 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1290 __ lduw(from.base(), from.disp(), tmp); 1291 __ stw(tmp, to.base(), to.disp()); 1292 break; 1293 } 1294 case T_OBJECT: { 1295 Register tmp = O7; 1296 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1297 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1298 __ ld_ptr(from.base(), from.disp(), tmp); 1299 __ st_ptr(tmp, to.base(), to.disp()); 1300 break; 1301 } 1302 case T_LONG: 1303 case T_DOUBLE: { 1304 Register tmp = O7; 1305 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1306 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1307 __ lduw(from.base(), from.disp(), tmp); 1308 __ stw(tmp, to.base(), to.disp()); 1309 __ lduw(from.base(), from.disp() + 4, tmp); 1310 __ stw(tmp, to.base(), to.disp() + 4); 1311 break; 1312 } 1313 1314 default: 1315 ShouldNotReachHere(); 1316 } 1317 } 1318 1319 1320 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1321 Address base = as_Address(addr); 1322 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1323 } 1324 1325 1326 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1327 Address base = as_Address(addr); 1328 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1329 } 1330 1331 1332 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1333 LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned, bool wide) { 1334 1335 LIR_Address* addr = src_opr->as_address_ptr(); 1336 LIR_Opr to_reg = dest; 1337 1338 Register src = addr->base()->as_pointer_register(); 1339 Register disp_reg = noreg; 1340 int disp_value = addr->disp(); 1341 bool needs_patching = (patch_code != lir_patch_none); 1342 1343 if (addr->base()->type() == T_OBJECT) { 1344 __ verify_oop(src); 1345 } 1346 1347 PatchingStub* patch = NULL; 1348 if (needs_patching) { 1349 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1350 assert(!to_reg->is_double_cpu() || 1351 patch_code == lir_patch_none || 1352 patch_code == lir_patch_normal, "patching doesn't match register"); 1353 } 1354 1355 if (addr->index()->is_illegal()) { 1356 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1357 if (needs_patching) { 1358 __ patchable_set(0, O7); 1359 } else { 1360 __ set(disp_value, O7); 1361 } 1362 disp_reg = O7; 1363 } 1364 } else if (unaligned || PatchALot) { 1365 __ add(src, addr->index()->as_register(), O7); 1366 src = O7; 1367 } else { 1368 disp_reg = addr->index()->as_pointer_register(); 1369 assert(disp_value == 0, "can't handle 3 operand addresses"); 1370 } 1371 1372 // remember the offset of the load. The patching_epilog must be done 1373 // before the call to add_debug_info, otherwise the PcDescs don't get 1374 // entered in increasing order. 1375 int offset = code_offset(); 1376 1377 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1378 if (disp_reg == noreg) { 1379 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1380 } else { 1381 assert(!unaligned, "can't handle this"); 1382 offset = load(src, disp_reg, to_reg, type, wide); 1383 } 1384 1385 if (patch != NULL) { 1386 patching_epilog(patch, patch_code, src, info); 1387 } 1388 if (info != NULL) add_debug_info_for_null_check(offset, info); 1389 } 1390 1391 1392 void LIR_Assembler::prefetchr(LIR_Opr src) { 1393 LIR_Address* addr = src->as_address_ptr(); 1394 Address from_addr = as_Address(addr); 1395 1396 if (VM_Version::has_v9()) { 1397 __ prefetch(from_addr, Assembler::severalReads); 1398 } 1399 } 1400 1401 1402 void LIR_Assembler::prefetchw(LIR_Opr src) { 1403 LIR_Address* addr = src->as_address_ptr(); 1404 Address from_addr = as_Address(addr); 1405 1406 if (VM_Version::has_v9()) { 1407 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); 1408 } 1409 } 1410 1411 1412 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1413 Address addr; 1414 if (src->is_single_word()) { 1415 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1416 } else if (src->is_double_word()) { 1417 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1418 } 1419 1420 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1421 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1422 } 1423 1424 1425 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1426 Address addr; 1427 if (dest->is_single_word()) { 1428 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1429 } else if (dest->is_double_word()) { 1430 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1431 } 1432 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1433 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1434 } 1435 1436 1437 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1438 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1439 if (from_reg->is_double_fpu()) { 1440 // double to double moves 1441 assert(to_reg->is_double_fpu(), "should match"); 1442 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1443 } else { 1444 // float to float moves 1445 assert(to_reg->is_single_fpu(), "should match"); 1446 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1447 } 1448 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1449 if (from_reg->is_double_cpu()) { 1450 #ifdef _LP64 1451 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1452 #else 1453 assert(to_reg->is_double_cpu() && 1454 from_reg->as_register_hi() != to_reg->as_register_lo() && 1455 from_reg->as_register_lo() != to_reg->as_register_hi(), 1456 "should both be long and not overlap"); 1457 // long to long moves 1458 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1459 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1460 #endif 1461 #ifdef _LP64 1462 } else if (to_reg->is_double_cpu()) { 1463 // int to int moves 1464 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1465 #endif 1466 } else { 1467 // int to int moves 1468 __ mov(from_reg->as_register(), to_reg->as_register()); 1469 } 1470 } else { 1471 ShouldNotReachHere(); 1472 } 1473 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1474 __ verify_oop(to_reg->as_register()); 1475 } 1476 } 1477 1478 1479 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1480 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1481 bool unaligned, bool wide) { 1482 LIR_Address* addr = dest->as_address_ptr(); 1483 1484 Register src = addr->base()->as_pointer_register(); 1485 Register disp_reg = noreg; 1486 int disp_value = addr->disp(); 1487 bool needs_patching = (patch_code != lir_patch_none); 1488 1489 if (addr->base()->is_oop_register()) { 1490 __ verify_oop(src); 1491 } 1492 1493 PatchingStub* patch = NULL; 1494 if (needs_patching) { 1495 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1496 assert(!from_reg->is_double_cpu() || 1497 patch_code == lir_patch_none || 1498 patch_code == lir_patch_normal, "patching doesn't match register"); 1499 } 1500 1501 if (addr->index()->is_illegal()) { 1502 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1503 if (needs_patching) { 1504 __ patchable_set(0, O7); 1505 } else { 1506 __ set(disp_value, O7); 1507 } 1508 disp_reg = O7; 1509 } 1510 } else if (unaligned || PatchALot) { 1511 __ add(src, addr->index()->as_register(), O7); 1512 src = O7; 1513 } else { 1514 disp_reg = addr->index()->as_pointer_register(); 1515 assert(disp_value == 0, "can't handle 3 operand addresses"); 1516 } 1517 1518 // remember the offset of the store. The patching_epilog must be done 1519 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1520 // entered in increasing order. 1521 int offset; 1522 1523 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1524 if (disp_reg == noreg) { 1525 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1526 } else { 1527 assert(!unaligned, "can't handle this"); 1528 offset = store(from_reg, src, disp_reg, type, wide); 1529 } 1530 1531 if (patch != NULL) { 1532 patching_epilog(patch, patch_code, src, info); 1533 } 1534 1535 if (info != NULL) add_debug_info_for_null_check(offset, info); 1536 } 1537 1538 1539 void LIR_Assembler::return_op(LIR_Opr result) { 1540 // the poll may need a register so just pick one that isn't the return register 1541 #if defined(TIERED) && !defined(_LP64) 1542 if (result->type_field() == LIR_OprDesc::long_type) { 1543 // Must move the result to G1 1544 // Must leave proper result in O0,O1 and G1 (TIERED only) 1545 __ sllx(I0, 32, G1); // Shift bits into high G1 1546 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1547 __ or3 (I1, G1, G1); // OR 64 bits into G1 1548 #ifdef ASSERT 1549 // mangle it so any problems will show up 1550 __ set(0xdeadbeef, I0); 1551 __ set(0xdeadbeef, I1); 1552 #endif 1553 } 1554 #endif // TIERED 1555 __ set((intptr_t)os::get_polling_page(), L0); 1556 __ relocate(relocInfo::poll_return_type); 1557 __ ld_ptr(L0, 0, G0); 1558 __ ret(); 1559 __ delayed()->restore(); 1560 } 1561 1562 1563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1564 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1565 if (info != NULL) { 1566 add_debug_info_for_branch(info); 1567 } else { 1568 __ relocate(relocInfo::poll_type); 1569 } 1570 1571 int offset = __ offset(); 1572 __ ld_ptr(tmp->as_register(), 0, G0); 1573 1574 return offset; 1575 } 1576 1577 1578 void LIR_Assembler::emit_static_call_stub() { 1579 address call_pc = __ pc(); 1580 address stub = __ start_a_stub(call_stub_size); 1581 if (stub == NULL) { 1582 bailout("static call stub overflow"); 1583 return; 1584 } 1585 1586 int start = __ offset(); 1587 __ relocate(static_stub_Relocation::spec(call_pc)); 1588 1589 __ set_oop(NULL, G5); 1590 // must be set to -1 at code generation time 1591 AddressLiteral addrlit(-1); 1592 __ jump_to(addrlit, G3); 1593 __ delayed()->nop(); 1594 1595 assert(__ offset() - start <= call_stub_size, "stub too big"); 1596 __ end_a_stub(); 1597 } 1598 1599 1600 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1601 if (opr1->is_single_fpu()) { 1602 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1603 } else if (opr1->is_double_fpu()) { 1604 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1605 } else if (opr1->is_single_cpu()) { 1606 if (opr2->is_constant()) { 1607 switch (opr2->as_constant_ptr()->type()) { 1608 case T_INT: 1609 { jint con = opr2->as_constant_ptr()->as_jint(); 1610 if (Assembler::is_simm13(con)) { 1611 __ cmp(opr1->as_register(), con); 1612 } else { 1613 __ set(con, O7); 1614 __ cmp(opr1->as_register(), O7); 1615 } 1616 } 1617 break; 1618 1619 case T_OBJECT: 1620 // there are only equal/notequal comparisions on objects 1621 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1622 if (con == NULL) { 1623 __ cmp(opr1->as_register(), 0); 1624 } else { 1625 jobject2reg(con, O7); 1626 __ cmp(opr1->as_register(), O7); 1627 } 1628 } 1629 break; 1630 1631 default: 1632 ShouldNotReachHere(); 1633 break; 1634 } 1635 } else { 1636 if (opr2->is_address()) { 1637 LIR_Address * addr = opr2->as_address_ptr(); 1638 BasicType type = addr->type(); 1639 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1640 else __ ld(as_Address(addr), O7); 1641 __ cmp(opr1->as_register(), O7); 1642 } else { 1643 __ cmp(opr1->as_register(), opr2->as_register()); 1644 } 1645 } 1646 } else if (opr1->is_double_cpu()) { 1647 Register xlo = opr1->as_register_lo(); 1648 Register xhi = opr1->as_register_hi(); 1649 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1650 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1651 #ifdef _LP64 1652 __ orcc(xhi, G0, G0); 1653 #else 1654 __ orcc(xhi, xlo, G0); 1655 #endif 1656 } else if (opr2->is_register()) { 1657 Register ylo = opr2->as_register_lo(); 1658 Register yhi = opr2->as_register_hi(); 1659 #ifdef _LP64 1660 __ cmp(xlo, ylo); 1661 #else 1662 __ subcc(xlo, ylo, xlo); 1663 __ subccc(xhi, yhi, xhi); 1664 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1665 __ orcc(xhi, xlo, G0); 1666 } 1667 #endif 1668 } else { 1669 ShouldNotReachHere(); 1670 } 1671 } else if (opr1->is_address()) { 1672 LIR_Address * addr = opr1->as_address_ptr(); 1673 BasicType type = addr->type(); 1674 assert (opr2->is_constant(), "Checking"); 1675 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1676 else __ ld(as_Address(addr), O7); 1677 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1678 } else { 1679 ShouldNotReachHere(); 1680 } 1681 } 1682 1683 1684 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1685 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1686 bool is_unordered_less = (code == lir_ucmp_fd2i); 1687 if (left->is_single_fpu()) { 1688 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1689 } else if (left->is_double_fpu()) { 1690 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1691 } else { 1692 ShouldNotReachHere(); 1693 } 1694 } else if (code == lir_cmp_l2i) { 1695 #ifdef _LP64 1696 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1697 #else 1698 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1699 right->as_register_hi(), right->as_register_lo(), 1700 dst->as_register()); 1701 #endif 1702 } else { 1703 ShouldNotReachHere(); 1704 } 1705 } 1706 1707 1708 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { 1709 1710 Assembler::Condition acond; 1711 switch (condition) { 1712 case lir_cond_equal: acond = Assembler::equal; break; 1713 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1714 case lir_cond_less: acond = Assembler::less; break; 1715 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1716 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1717 case lir_cond_greater: acond = Assembler::greater; break; 1718 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1719 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1720 default: ShouldNotReachHere(); 1721 }; 1722 1723 if (opr1->is_constant() && opr1->type() == T_INT) { 1724 Register dest = result->as_register(); 1725 // load up first part of constant before branch 1726 // and do the rest in the delay slot. 1727 if (!Assembler::is_simm13(opr1->as_jint())) { 1728 __ sethi(opr1->as_jint(), dest); 1729 } 1730 } else if (opr1->is_constant()) { 1731 const2reg(opr1, result, lir_patch_none, NULL); 1732 } else if (opr1->is_register()) { 1733 reg2reg(opr1, result); 1734 } else if (opr1->is_stack()) { 1735 stack2reg(opr1, result, result->type()); 1736 } else { 1737 ShouldNotReachHere(); 1738 } 1739 Label skip; 1740 __ br(acond, false, Assembler::pt, skip); 1741 if (opr1->is_constant() && opr1->type() == T_INT) { 1742 Register dest = result->as_register(); 1743 if (Assembler::is_simm13(opr1->as_jint())) { 1744 __ delayed()->or3(G0, opr1->as_jint(), dest); 1745 } else { 1746 // the sethi has been done above, so just put in the low 10 bits 1747 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1748 } 1749 } else { 1750 // can't do anything useful in the delay slot 1751 __ delayed()->nop(); 1752 } 1753 if (opr2->is_constant()) { 1754 const2reg(opr2, result, lir_patch_none, NULL); 1755 } else if (opr2->is_register()) { 1756 reg2reg(opr2, result); 1757 } else if (opr2->is_stack()) { 1758 stack2reg(opr2, result, result->type()); 1759 } else { 1760 ShouldNotReachHere(); 1761 } 1762 __ bind(skip); 1763 } 1764 1765 1766 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1767 assert(info == NULL, "unused on this code path"); 1768 assert(left->is_register(), "wrong items state"); 1769 assert(dest->is_register(), "wrong items state"); 1770 1771 if (right->is_register()) { 1772 if (dest->is_float_kind()) { 1773 1774 FloatRegister lreg, rreg, res; 1775 FloatRegisterImpl::Width w; 1776 if (right->is_single_fpu()) { 1777 w = FloatRegisterImpl::S; 1778 lreg = left->as_float_reg(); 1779 rreg = right->as_float_reg(); 1780 res = dest->as_float_reg(); 1781 } else { 1782 w = FloatRegisterImpl::D; 1783 lreg = left->as_double_reg(); 1784 rreg = right->as_double_reg(); 1785 res = dest->as_double_reg(); 1786 } 1787 1788 switch (code) { 1789 case lir_add: __ fadd(w, lreg, rreg, res); break; 1790 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1791 case lir_mul: // fall through 1792 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1793 case lir_div: // fall through 1794 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1795 default: ShouldNotReachHere(); 1796 } 1797 1798 } else if (dest->is_double_cpu()) { 1799 #ifdef _LP64 1800 Register dst_lo = dest->as_register_lo(); 1801 Register op1_lo = left->as_pointer_register(); 1802 Register op2_lo = right->as_pointer_register(); 1803 1804 switch (code) { 1805 case lir_add: 1806 __ add(op1_lo, op2_lo, dst_lo); 1807 break; 1808 1809 case lir_sub: 1810 __ sub(op1_lo, op2_lo, dst_lo); 1811 break; 1812 1813 default: ShouldNotReachHere(); 1814 } 1815 #else 1816 Register op1_lo = left->as_register_lo(); 1817 Register op1_hi = left->as_register_hi(); 1818 Register op2_lo = right->as_register_lo(); 1819 Register op2_hi = right->as_register_hi(); 1820 Register dst_lo = dest->as_register_lo(); 1821 Register dst_hi = dest->as_register_hi(); 1822 1823 switch (code) { 1824 case lir_add: 1825 __ addcc(op1_lo, op2_lo, dst_lo); 1826 __ addc (op1_hi, op2_hi, dst_hi); 1827 break; 1828 1829 case lir_sub: 1830 __ subcc(op1_lo, op2_lo, dst_lo); 1831 __ subc (op1_hi, op2_hi, dst_hi); 1832 break; 1833 1834 default: ShouldNotReachHere(); 1835 } 1836 #endif 1837 } else { 1838 assert (right->is_single_cpu(), "Just Checking"); 1839 1840 Register lreg = left->as_register(); 1841 Register res = dest->as_register(); 1842 Register rreg = right->as_register(); 1843 switch (code) { 1844 case lir_add: __ add (lreg, rreg, res); break; 1845 case lir_sub: __ sub (lreg, rreg, res); break; 1846 case lir_mul: __ mult (lreg, rreg, res); break; 1847 default: ShouldNotReachHere(); 1848 } 1849 } 1850 } else { 1851 assert (right->is_constant(), "must be constant"); 1852 1853 if (dest->is_single_cpu()) { 1854 Register lreg = left->as_register(); 1855 Register res = dest->as_register(); 1856 int simm13 = right->as_constant_ptr()->as_jint(); 1857 1858 switch (code) { 1859 case lir_add: __ add (lreg, simm13, res); break; 1860 case lir_sub: __ sub (lreg, simm13, res); break; 1861 case lir_mul: __ mult (lreg, simm13, res); break; 1862 default: ShouldNotReachHere(); 1863 } 1864 } else { 1865 Register lreg = left->as_pointer_register(); 1866 Register res = dest->as_register_lo(); 1867 long con = right->as_constant_ptr()->as_jlong(); 1868 assert(Assembler::is_simm13(con), "must be simm13"); 1869 1870 switch (code) { 1871 case lir_add: __ add (lreg, (int)con, res); break; 1872 case lir_sub: __ sub (lreg, (int)con, res); break; 1873 case lir_mul: __ mult (lreg, (int)con, res); break; 1874 default: ShouldNotReachHere(); 1875 } 1876 } 1877 } 1878 } 1879 1880 1881 void LIR_Assembler::fpop() { 1882 // do nothing 1883 } 1884 1885 1886 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1887 switch (code) { 1888 case lir_sin: 1889 case lir_tan: 1890 case lir_cos: { 1891 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1892 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1893 break; 1894 } 1895 case lir_sqrt: { 1896 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1897 FloatRegister src_reg = value->as_double_reg(); 1898 FloatRegister dst_reg = dest->as_double_reg(); 1899 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1900 break; 1901 } 1902 case lir_abs: { 1903 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1904 FloatRegister src_reg = value->as_double_reg(); 1905 FloatRegister dst_reg = dest->as_double_reg(); 1906 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1907 break; 1908 } 1909 default: { 1910 ShouldNotReachHere(); 1911 break; 1912 } 1913 } 1914 } 1915 1916 1917 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1918 if (right->is_constant()) { 1919 if (dest->is_single_cpu()) { 1920 int simm13 = right->as_constant_ptr()->as_jint(); 1921 switch (code) { 1922 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1923 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1924 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1925 default: ShouldNotReachHere(); 1926 } 1927 } else { 1928 long c = right->as_constant_ptr()->as_jlong(); 1929 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1930 int simm13 = (int)c; 1931 switch (code) { 1932 case lir_logic_and: 1933 #ifndef _LP64 1934 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1935 #endif 1936 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1937 break; 1938 1939 case lir_logic_or: 1940 #ifndef _LP64 1941 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 1942 #endif 1943 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1944 break; 1945 1946 case lir_logic_xor: 1947 #ifndef _LP64 1948 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 1949 #endif 1950 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1951 break; 1952 1953 default: ShouldNotReachHere(); 1954 } 1955 } 1956 } else { 1957 assert(right->is_register(), "right should be in register"); 1958 1959 if (dest->is_single_cpu()) { 1960 switch (code) { 1961 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1962 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1963 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1964 default: ShouldNotReachHere(); 1965 } 1966 } else { 1967 #ifdef _LP64 1968 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1969 left->as_register_lo(); 1970 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1971 right->as_register_lo(); 1972 1973 switch (code) { 1974 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 1975 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 1976 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 1977 default: ShouldNotReachHere(); 1978 } 1979 #else 1980 switch (code) { 1981 case lir_logic_and: 1982 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1983 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1984 break; 1985 1986 case lir_logic_or: 1987 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1988 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1989 break; 1990 1991 case lir_logic_xor: 1992 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 1993 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 1994 break; 1995 1996 default: ShouldNotReachHere(); 1997 } 1998 #endif 1999 } 2000 } 2001 } 2002 2003 2004 int LIR_Assembler::shift_amount(BasicType t) { 2005 int elem_size = type2aelembytes(t); 2006 switch (elem_size) { 2007 case 1 : return 0; 2008 case 2 : return 1; 2009 case 4 : return 2; 2010 case 8 : return 3; 2011 } 2012 ShouldNotReachHere(); 2013 return -1; 2014 } 2015 2016 2017 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2018 assert(exceptionOop->as_register() == Oexception, "should match"); 2019 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 2020 2021 info->add_register_oop(exceptionOop); 2022 2023 // reuse the debug info from the safepoint poll for the throw op itself 2024 address pc_for_athrow = __ pc(); 2025 int pc_for_athrow_offset = __ offset(); 2026 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 2027 __ set(pc_for_athrow, Oissuing_pc, rspec); 2028 add_call_info(pc_for_athrow_offset, info); // for exception handler 2029 2030 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 2031 __ delayed()->nop(); 2032 } 2033 2034 2035 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2036 assert(exceptionOop->as_register() == Oexception, "should match"); 2037 2038 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 2039 __ delayed()->nop(); 2040 } 2041 2042 2043 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2044 Register src = op->src()->as_register(); 2045 Register dst = op->dst()->as_register(); 2046 Register src_pos = op->src_pos()->as_register(); 2047 Register dst_pos = op->dst_pos()->as_register(); 2048 Register length = op->length()->as_register(); 2049 Register tmp = op->tmp()->as_register(); 2050 Register tmp2 = O7; 2051 2052 int flags = op->flags(); 2053 ciArrayKlass* default_type = op->expected_type(); 2054 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2055 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2056 2057 // set up the arraycopy stub information 2058 ArrayCopyStub* stub = op->stub(); 2059 2060 // always do stub if no type information is available. it's ok if 2061 // the known type isn't loaded since the code sanity checks 2062 // in debug mode and the type isn't required when we know the exact type 2063 // also check that the type is an array type. 2064 // We also, for now, always call the stub if the barrier set requires a 2065 // write_ref_pre barrier (which the stub does, but none of the optimized 2066 // cases currently does). 2067 if (op->expected_type() == NULL || 2068 Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) { 2069 __ mov(src, O0); 2070 __ mov(src_pos, O1); 2071 __ mov(dst, O2); 2072 __ mov(dst_pos, O3); 2073 __ mov(length, O4); 2074 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2075 2076 __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); 2077 __ delayed()->nop(); 2078 __ bind(*stub->continuation()); 2079 return; 2080 } 2081 2082 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2083 2084 // make sure src and dst are non-null and load array length 2085 if (flags & LIR_OpArrayCopy::src_null_check) { 2086 __ tst(src); 2087 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2088 __ delayed()->nop(); 2089 } 2090 2091 if (flags & LIR_OpArrayCopy::dst_null_check) { 2092 __ tst(dst); 2093 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2094 __ delayed()->nop(); 2095 } 2096 2097 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2098 // test src_pos register 2099 __ tst(src_pos); 2100 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2101 __ delayed()->nop(); 2102 } 2103 2104 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2105 // test dst_pos register 2106 __ tst(dst_pos); 2107 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2108 __ delayed()->nop(); 2109 } 2110 2111 if (flags & LIR_OpArrayCopy::length_positive_check) { 2112 // make sure length isn't negative 2113 __ tst(length); 2114 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); 2115 __ delayed()->nop(); 2116 } 2117 2118 if (flags & LIR_OpArrayCopy::src_range_check) { 2119 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2120 __ add(length, src_pos, tmp); 2121 __ cmp(tmp2, tmp); 2122 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2123 __ delayed()->nop(); 2124 } 2125 2126 if (flags & LIR_OpArrayCopy::dst_range_check) { 2127 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2128 __ add(length, dst_pos, tmp); 2129 __ cmp(tmp2, tmp); 2130 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2131 __ delayed()->nop(); 2132 } 2133 2134 if (flags & LIR_OpArrayCopy::type_check) { 2135 #ifdef _LP64 2136 if (UseCompressedOops) { 2137 // We don't need decode because we just need to compare 2138 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2139 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2140 } else 2141 #endif 2142 { 2143 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2144 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2145 } 2146 __ cmp(tmp, tmp2); 2147 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2148 __ delayed()->nop(); 2149 } 2150 2151 #ifdef ASSERT 2152 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2153 // Sanity check the known type with the incoming class. For the 2154 // primitive case the types must match exactly with src.klass and 2155 // dst.klass each exactly matching the default type. For the 2156 // object array case, if no type check is needed then either the 2157 // dst type is exactly the expected type and the src type is a 2158 // subtype which we can't check or src is the same array as dst 2159 // but not necessarily exactly of type default_type. 2160 Label known_ok, halt; 2161 jobject2reg(op->expected_type()->constant_encoding(), tmp); 2162 #ifdef _LP64 2163 if (UseCompressedOops) { 2164 __ encode_heap_oop(tmp, tmp); 2165 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2166 } else 2167 #endif 2168 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2169 if (basic_type != T_OBJECT) { 2170 __ cmp(tmp, tmp2); 2171 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2172 #ifdef _LP64 2173 if (UseCompressedOops) { 2174 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2175 } else 2176 #endif 2177 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2178 __ cmp(tmp, tmp2); 2179 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2180 __ delayed()->nop(); 2181 } else { 2182 __ cmp(tmp, tmp2); 2183 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2184 __ delayed()->cmp(src, dst); 2185 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2186 __ delayed()->nop(); 2187 } 2188 __ bind(halt); 2189 __ stop("incorrect type information in arraycopy"); 2190 __ bind(known_ok); 2191 } 2192 #endif 2193 2194 int shift = shift_amount(basic_type); 2195 2196 Register src_ptr = O0; 2197 Register dst_ptr = O1; 2198 Register len = O2; 2199 2200 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2201 LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null 2202 if (shift == 0) { 2203 __ add(src_ptr, src_pos, src_ptr); 2204 } else { 2205 __ sll(src_pos, shift, tmp); 2206 __ add(src_ptr, tmp, src_ptr); 2207 } 2208 2209 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2210 LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null 2211 if (shift == 0) { 2212 __ add(dst_ptr, dst_pos, dst_ptr); 2213 } else { 2214 __ sll(dst_pos, shift, tmp); 2215 __ add(dst_ptr, tmp, dst_ptr); 2216 } 2217 2218 if (basic_type != T_OBJECT) { 2219 if (shift == 0) { 2220 __ mov(length, len); 2221 } else { 2222 __ sll(length, shift, len); 2223 } 2224 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy)); 2225 } else { 2226 // oop_arraycopy takes a length in number of elements, so don't scale it. 2227 __ mov(length, len); 2228 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy)); 2229 } 2230 2231 __ bind(*stub->continuation()); 2232 } 2233 2234 2235 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2236 if (dest->is_single_cpu()) { 2237 #ifdef _LP64 2238 if (left->type() == T_OBJECT) { 2239 switch (code) { 2240 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2241 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2242 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2243 default: ShouldNotReachHere(); 2244 } 2245 } else 2246 #endif 2247 switch (code) { 2248 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2249 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2250 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2251 default: ShouldNotReachHere(); 2252 } 2253 } else { 2254 #ifdef _LP64 2255 switch (code) { 2256 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2257 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2258 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2259 default: ShouldNotReachHere(); 2260 } 2261 #else 2262 switch (code) { 2263 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2264 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2265 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2266 default: ShouldNotReachHere(); 2267 } 2268 #endif 2269 } 2270 } 2271 2272 2273 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2274 #ifdef _LP64 2275 if (left->type() == T_OBJECT) { 2276 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2277 Register l = left->as_register(); 2278 Register d = dest->as_register_lo(); 2279 switch (code) { 2280 case lir_shl: __ sllx (l, count, d); break; 2281 case lir_shr: __ srax (l, count, d); break; 2282 case lir_ushr: __ srlx (l, count, d); break; 2283 default: ShouldNotReachHere(); 2284 } 2285 return; 2286 } 2287 #endif 2288 2289 if (dest->is_single_cpu()) { 2290 count = count & 0x1F; // Java spec 2291 switch (code) { 2292 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2293 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2294 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2295 default: ShouldNotReachHere(); 2296 } 2297 } else if (dest->is_double_cpu()) { 2298 count = count & 63; // Java spec 2299 switch (code) { 2300 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2301 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2302 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2303 default: ShouldNotReachHere(); 2304 } 2305 } else { 2306 ShouldNotReachHere(); 2307 } 2308 } 2309 2310 2311 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2312 assert(op->tmp1()->as_register() == G1 && 2313 op->tmp2()->as_register() == G3 && 2314 op->tmp3()->as_register() == G4 && 2315 op->obj()->as_register() == O0 && 2316 op->klass()->as_register() == G5, "must be"); 2317 if (op->init_check()) { 2318 __ ld(op->klass()->as_register(), 2319 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), 2320 op->tmp1()->as_register()); 2321 add_debug_info_for_null_check_here(op->stub()->info()); 2322 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized); 2323 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2324 __ delayed()->nop(); 2325 } 2326 __ allocate_object(op->obj()->as_register(), 2327 op->tmp1()->as_register(), 2328 op->tmp2()->as_register(), 2329 op->tmp3()->as_register(), 2330 op->header_size(), 2331 op->object_size(), 2332 op->klass()->as_register(), 2333 *op->stub()->entry()); 2334 __ bind(*op->stub()->continuation()); 2335 __ verify_oop(op->obj()->as_register()); 2336 } 2337 2338 2339 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2340 assert(op->tmp1()->as_register() == G1 && 2341 op->tmp2()->as_register() == G3 && 2342 op->tmp3()->as_register() == G4 && 2343 op->tmp4()->as_register() == O1 && 2344 op->klass()->as_register() == G5, "must be"); 2345 if (UseSlowPath || 2346 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2347 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2348 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2349 __ delayed()->nop(); 2350 } else { 2351 __ allocate_array(op->obj()->as_register(), 2352 op->len()->as_register(), 2353 op->tmp1()->as_register(), 2354 op->tmp2()->as_register(), 2355 op->tmp3()->as_register(), 2356 arrayOopDesc::header_size(op->type()), 2357 type2aelembytes(op->type()), 2358 op->klass()->as_register(), 2359 *op->stub()->entry()); 2360 } 2361 __ bind(*op->stub()->continuation()); 2362 } 2363 2364 2365 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2366 ciMethodData *md, ciProfileData *data, 2367 Register recv, Register tmp1, Label* update_done) { 2368 uint i; 2369 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2370 Label next_test; 2371 // See if the receiver is receiver[n]. 2372 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2373 mdo_offset_bias); 2374 __ ld_ptr(receiver_addr, tmp1); 2375 __ verify_oop(tmp1); 2376 __ cmp(recv, tmp1); 2377 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); 2378 __ delayed()->nop(); 2379 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2380 mdo_offset_bias); 2381 __ ld_ptr(data_addr, tmp1); 2382 __ add(tmp1, DataLayout::counter_increment, tmp1); 2383 __ st_ptr(tmp1, data_addr); 2384 __ ba(false, *update_done); 2385 __ delayed()->nop(); 2386 __ bind(next_test); 2387 } 2388 2389 // Didn't find receiver; find next empty slot and fill it in 2390 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2391 Label next_test; 2392 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2393 mdo_offset_bias); 2394 __ ld_ptr(recv_addr, tmp1); 2395 __ br_notnull(tmp1, false, Assembler::pt, next_test); 2396 __ delayed()->nop(); 2397 __ st_ptr(recv, recv_addr); 2398 __ set(DataLayout::counter_increment, tmp1); 2399 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2400 mdo_offset_bias); 2401 __ ba(false, *update_done); 2402 __ delayed()->nop(); 2403 __ bind(next_test); 2404 } 2405 } 2406 2407 2408 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2409 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2410 md = method->method_data(); 2411 if (md == NULL) { 2412 bailout("out of memory building methodDataOop"); 2413 return; 2414 } 2415 data = md->bci_to_data(bci); 2416 assert(data != NULL, "need data for checkcast"); 2417 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2418 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2419 // The offset is large so bias the mdo by the base of the slot so 2420 // that the ld can use simm13s to reference the slots of the data 2421 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2422 } 2423 } 2424 2425 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2426 // we always need a stub for the failure case. 2427 CodeStub* stub = op->stub(); 2428 Register obj = op->object()->as_register(); 2429 Register k_RInfo = op->tmp1()->as_register(); 2430 Register klass_RInfo = op->tmp2()->as_register(); 2431 Register dst = op->result_opr()->as_register(); 2432 Register Rtmp1 = op->tmp3()->as_register(); 2433 ciKlass* k = op->klass(); 2434 2435 2436 if (obj == k_RInfo) { 2437 k_RInfo = klass_RInfo; 2438 klass_RInfo = obj; 2439 } 2440 2441 ciMethodData* md; 2442 ciProfileData* data; 2443 int mdo_offset_bias = 0; 2444 if (op->should_profile()) { 2445 ciMethod* method = op->profiled_method(); 2446 assert(method != NULL, "Should have method"); 2447 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2448 2449 Label not_null; 2450 __ br_notnull(obj, false, Assembler::pn, not_null); 2451 __ delayed()->nop(); 2452 Register mdo = k_RInfo; 2453 Register data_val = Rtmp1; 2454 jobject2reg(md->constant_encoding(), mdo); 2455 if (mdo_offset_bias > 0) { 2456 __ set(mdo_offset_bias, data_val); 2457 __ add(mdo, data_val, mdo); 2458 } 2459 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2460 __ ldub(flags_addr, data_val); 2461 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2462 __ stb(data_val, flags_addr); 2463 __ ba(false, *obj_is_null); 2464 __ delayed()->nop(); 2465 __ bind(not_null); 2466 } else { 2467 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2468 __ delayed()->nop(); 2469 } 2470 2471 Label profile_cast_failure, profile_cast_success; 2472 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2473 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2474 2475 // patching may screw with our temporaries on sparc, 2476 // so let's do it before loading the class 2477 if (k->is_loaded()) { 2478 jobject2reg(k->constant_encoding(), k_RInfo); 2479 } else { 2480 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 2481 } 2482 assert(obj != k_RInfo, "must be different"); 2483 2484 // get object class 2485 // not a safepoint as obj null check happens earlier 2486 __ load_klass(obj, klass_RInfo); 2487 if (op->fast_check()) { 2488 assert_different_registers(klass_RInfo, k_RInfo); 2489 __ cmp(k_RInfo, klass_RInfo); 2490 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2491 __ delayed()->nop(); 2492 } else { 2493 bool need_slow_path = true; 2494 if (k->is_loaded()) { 2495 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()) 2496 need_slow_path = false; 2497 // perform the fast part of the checking logic 2498 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2499 (need_slow_path ? success_target : NULL), 2500 failure_target, NULL, 2501 RegisterOrConstant(k->super_check_offset())); 2502 } else { 2503 // perform the fast part of the checking logic 2504 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2505 failure_target, NULL); 2506 } 2507 if (need_slow_path) { 2508 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2509 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2510 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2511 __ delayed()->nop(); 2512 __ cmp(G3, 0); 2513 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2514 __ delayed()->nop(); 2515 // Fall through to success case 2516 } 2517 } 2518 2519 if (op->should_profile()) { 2520 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2521 assert_different_registers(obj, mdo, recv, tmp1); 2522 __ bind(profile_cast_success); 2523 jobject2reg(md->constant_encoding(), mdo); 2524 if (mdo_offset_bias > 0) { 2525 __ set(mdo_offset_bias, tmp1); 2526 __ add(mdo, tmp1, mdo); 2527 } 2528 __ load_klass(obj, recv); 2529 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2530 // Jump over the failure case 2531 __ ba(false, *success); 2532 __ delayed()->nop(); 2533 // Cast failure case 2534 __ bind(profile_cast_failure); 2535 jobject2reg(md->constant_encoding(), mdo); 2536 if (mdo_offset_bias > 0) { 2537 __ set(mdo_offset_bias, tmp1); 2538 __ add(mdo, tmp1, mdo); 2539 } 2540 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2541 __ ld_ptr(data_addr, tmp1); 2542 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2543 __ st_ptr(tmp1, data_addr); 2544 __ ba(false, *failure); 2545 __ delayed()->nop(); 2546 } 2547 __ ba(false, *success); 2548 __ delayed()->nop(); 2549 } 2550 2551 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2552 LIR_Code code = op->code(); 2553 if (code == lir_store_check) { 2554 Register value = op->object()->as_register(); 2555 Register array = op->array()->as_register(); 2556 Register k_RInfo = op->tmp1()->as_register(); 2557 Register klass_RInfo = op->tmp2()->as_register(); 2558 Register Rtmp1 = op->tmp3()->as_register(); 2559 2560 __ verify_oop(value); 2561 CodeStub* stub = op->stub(); 2562 // check if it needs to be profiled 2563 ciMethodData* md; 2564 ciProfileData* data; 2565 int mdo_offset_bias = 0; 2566 if (op->should_profile()) { 2567 ciMethod* method = op->profiled_method(); 2568 assert(method != NULL, "Should have method"); 2569 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2570 } 2571 Label profile_cast_success, profile_cast_failure, done; 2572 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2573 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2574 2575 if (op->should_profile()) { 2576 Label not_null; 2577 __ br_notnull(value, false, Assembler::pn, not_null); 2578 __ delayed()->nop(); 2579 Register mdo = k_RInfo; 2580 Register data_val = Rtmp1; 2581 jobject2reg(md->constant_encoding(), mdo); 2582 if (mdo_offset_bias > 0) { 2583 __ set(mdo_offset_bias, data_val); 2584 __ add(mdo, data_val, mdo); 2585 } 2586 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2587 __ ldub(flags_addr, data_val); 2588 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2589 __ stb(data_val, flags_addr); 2590 __ ba(false, done); 2591 __ delayed()->nop(); 2592 __ bind(not_null); 2593 } else { 2594 __ br_null(value, false, Assembler::pn, done); 2595 __ delayed()->nop(); 2596 } 2597 add_debug_info_for_null_check_here(op->info_for_exception()); 2598 __ load_klass(array, k_RInfo); 2599 __ load_klass(value, klass_RInfo); 2600 2601 // get instance klass 2602 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); 2603 // perform the fast part of the checking logic 2604 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2605 2606 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2607 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2608 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2609 __ delayed()->nop(); 2610 __ cmp(G3, 0); 2611 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2612 __ delayed()->nop(); 2613 // fall through to the success case 2614 2615 if (op->should_profile()) { 2616 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2617 assert_different_registers(value, mdo, recv, tmp1); 2618 __ bind(profile_cast_success); 2619 jobject2reg(md->constant_encoding(), mdo); 2620 if (mdo_offset_bias > 0) { 2621 __ set(mdo_offset_bias, tmp1); 2622 __ add(mdo, tmp1, mdo); 2623 } 2624 __ load_klass(value, recv); 2625 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2626 __ ba(false, done); 2627 __ delayed()->nop(); 2628 // Cast failure case 2629 __ bind(profile_cast_failure); 2630 jobject2reg(md->constant_encoding(), mdo); 2631 if (mdo_offset_bias > 0) { 2632 __ set(mdo_offset_bias, tmp1); 2633 __ add(mdo, tmp1, mdo); 2634 } 2635 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2636 __ ld_ptr(data_addr, tmp1); 2637 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2638 __ st_ptr(tmp1, data_addr); 2639 __ ba(false, *stub->entry()); 2640 __ delayed()->nop(); 2641 } 2642 __ bind(done); 2643 } else if (code == lir_checkcast) { 2644 Register obj = op->object()->as_register(); 2645 Register dst = op->result_opr()->as_register(); 2646 Label success; 2647 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2648 __ bind(success); 2649 __ mov(obj, dst); 2650 } else if (code == lir_instanceof) { 2651 Register obj = op->object()->as_register(); 2652 Register dst = op->result_opr()->as_register(); 2653 Label success, failure, done; 2654 emit_typecheck_helper(op, &success, &failure, &failure); 2655 __ bind(failure); 2656 __ set(0, dst); 2657 __ ba(false, done); 2658 __ delayed()->nop(); 2659 __ bind(success); 2660 __ set(1, dst); 2661 __ bind(done); 2662 } else { 2663 ShouldNotReachHere(); 2664 } 2665 2666 } 2667 2668 2669 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2670 if (op->code() == lir_cas_long) { 2671 assert(VM_Version::supports_cx8(), "wrong machine"); 2672 Register addr = op->addr()->as_pointer_register(); 2673 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2674 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2675 Register new_value_lo = op->new_value()->as_register_lo(); 2676 Register new_value_hi = op->new_value()->as_register_hi(); 2677 Register t1 = op->tmp1()->as_register(); 2678 Register t2 = op->tmp2()->as_register(); 2679 #ifdef _LP64 2680 __ mov(cmp_value_lo, t1); 2681 __ mov(new_value_lo, t2); 2682 #else 2683 // move high and low halves of long values into single registers 2684 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2685 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2686 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2687 __ sllx(new_value_hi, 32, t2); 2688 __ srl(new_value_lo, 0, new_value_lo); 2689 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2690 #endif 2691 // perform the compare and swap operation 2692 __ casx(addr, t1, t2); 2693 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2694 // overwritten with the original value in "addr" and will be equal to t1. 2695 __ cmp(t1, t2); 2696 2697 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2698 Register addr = op->addr()->as_pointer_register(); 2699 Register cmp_value = op->cmp_value()->as_register(); 2700 Register new_value = op->new_value()->as_register(); 2701 Register t1 = op->tmp1()->as_register(); 2702 Register t2 = op->tmp2()->as_register(); 2703 __ mov(cmp_value, t1); 2704 __ mov(new_value, t2); 2705 #ifdef _LP64 2706 if (op->code() == lir_cas_obj) { 2707 if (UseCompressedOops) { 2708 __ encode_heap_oop(t1, t1); 2709 __ encode_heap_oop(t2, t2); 2710 __ cas(addr, t1, t2); 2711 } else { 2712 __ casx(addr, t1, t2); 2713 } 2714 } else 2715 #endif 2716 { 2717 __ cas(addr, t1, t2); 2718 } 2719 __ cmp(t1, t2); 2720 } else { 2721 Unimplemented(); 2722 } 2723 } 2724 2725 void LIR_Assembler::set_24bit_FPU() { 2726 Unimplemented(); 2727 } 2728 2729 2730 void LIR_Assembler::reset_FPU() { 2731 Unimplemented(); 2732 } 2733 2734 2735 void LIR_Assembler::breakpoint() { 2736 __ breakpoint_trap(); 2737 } 2738 2739 2740 void LIR_Assembler::push(LIR_Opr opr) { 2741 Unimplemented(); 2742 } 2743 2744 2745 void LIR_Assembler::pop(LIR_Opr opr) { 2746 Unimplemented(); 2747 } 2748 2749 2750 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2751 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2752 Register dst = dst_opr->as_register(); 2753 Register reg = mon_addr.base(); 2754 int offset = mon_addr.disp(); 2755 // compute pointer to BasicLock 2756 if (mon_addr.is_simm13()) { 2757 __ add(reg, offset, dst); 2758 } else { 2759 __ set(offset, dst); 2760 __ add(dst, reg, dst); 2761 } 2762 } 2763 2764 2765 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2766 Register obj = op->obj_opr()->as_register(); 2767 Register hdr = op->hdr_opr()->as_register(); 2768 Register lock = op->lock_opr()->as_register(); 2769 2770 // obj may not be an oop 2771 if (op->code() == lir_lock) { 2772 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2773 if (UseFastLocking) { 2774 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2775 // add debug info for NullPointerException only if one is possible 2776 if (op->info() != NULL) { 2777 add_debug_info_for_null_check_here(op->info()); 2778 } 2779 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2780 } else { 2781 // always do slow locking 2782 // note: the slow locking code could be inlined here, however if we use 2783 // slow locking, speed doesn't matter anyway and this solution is 2784 // simpler and requires less duplicated code - additionally, the 2785 // slow locking code is the same in either case which simplifies 2786 // debugging 2787 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2788 __ delayed()->nop(); 2789 } 2790 } else { 2791 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2792 if (UseFastLocking) { 2793 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2794 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2795 } else { 2796 // always do slow unlocking 2797 // note: the slow unlocking code could be inlined here, however if we use 2798 // slow unlocking, speed doesn't matter anyway and this solution is 2799 // simpler and requires less duplicated code - additionally, the 2800 // slow unlocking code is the same in either case which simplifies 2801 // debugging 2802 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2803 __ delayed()->nop(); 2804 } 2805 } 2806 __ bind(*op->stub()->continuation()); 2807 } 2808 2809 2810 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2811 ciMethod* method = op->profiled_method(); 2812 int bci = op->profiled_bci(); 2813 2814 // Update counter for all call types 2815 ciMethodData* md = method->method_data(); 2816 if (md == NULL) { 2817 bailout("out of memory building methodDataOop"); 2818 return; 2819 } 2820 ciProfileData* data = md->bci_to_data(bci); 2821 assert(data->is_CounterData(), "need CounterData for calls"); 2822 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2823 Register mdo = op->mdo()->as_register(); 2824 #ifdef _LP64 2825 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2826 Register tmp1 = op->tmp1()->as_register_lo(); 2827 #else 2828 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2829 Register tmp1 = op->tmp1()->as_register(); 2830 #endif 2831 jobject2reg(md->constant_encoding(), mdo); 2832 int mdo_offset_bias = 0; 2833 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2834 data->size_in_bytes())) { 2835 // The offset is large so bias the mdo by the base of the slot so 2836 // that the ld can use simm13s to reference the slots of the data 2837 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2838 __ set(mdo_offset_bias, O7); 2839 __ add(mdo, O7, mdo); 2840 } 2841 2842 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2843 Bytecodes::Code bc = method->java_code_at_bci(bci); 2844 // Perform additional virtual call profiling for invokevirtual and 2845 // invokeinterface bytecodes 2846 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2847 C1ProfileVirtualCalls) { 2848 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2849 Register recv = op->recv()->as_register(); 2850 assert_different_registers(mdo, tmp1, recv); 2851 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2852 ciKlass* known_klass = op->known_holder(); 2853 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2854 // We know the type that will be seen at this call site; we can 2855 // statically update the methodDataOop rather than needing to do 2856 // dynamic tests on the receiver type 2857 2858 // NOTE: we should probably put a lock around this search to 2859 // avoid collisions by concurrent compilations 2860 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2861 uint i; 2862 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2863 ciKlass* receiver = vc_data->receiver(i); 2864 if (known_klass->equals(receiver)) { 2865 Address data_addr(mdo, md->byte_offset_of_slot(data, 2866 VirtualCallData::receiver_count_offset(i)) - 2867 mdo_offset_bias); 2868 __ ld_ptr(data_addr, tmp1); 2869 __ add(tmp1, DataLayout::counter_increment, tmp1); 2870 __ st_ptr(tmp1, data_addr); 2871 return; 2872 } 2873 } 2874 2875 // Receiver type not found in profile data; select an empty slot 2876 2877 // Note that this is less efficient than it should be because it 2878 // always does a write to the receiver part of the 2879 // VirtualCallData rather than just the first time 2880 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2881 ciKlass* receiver = vc_data->receiver(i); 2882 if (receiver == NULL) { 2883 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2884 mdo_offset_bias); 2885 jobject2reg(known_klass->constant_encoding(), tmp1); 2886 __ st_ptr(tmp1, recv_addr); 2887 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2888 mdo_offset_bias); 2889 __ ld_ptr(data_addr, tmp1); 2890 __ add(tmp1, DataLayout::counter_increment, tmp1); 2891 __ st_ptr(tmp1, data_addr); 2892 return; 2893 } 2894 } 2895 } else { 2896 __ load_klass(recv, recv); 2897 Label update_done; 2898 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2899 // Receiver did not match any saved receiver and there is no empty row for it. 2900 // Increment total counter to indicate polymorphic case. 2901 __ ld_ptr(counter_addr, tmp1); 2902 __ add(tmp1, DataLayout::counter_increment, tmp1); 2903 __ st_ptr(tmp1, counter_addr); 2904 2905 __ bind(update_done); 2906 } 2907 } else { 2908 // Static call 2909 __ ld_ptr(counter_addr, tmp1); 2910 __ add(tmp1, DataLayout::counter_increment, tmp1); 2911 __ st_ptr(tmp1, counter_addr); 2912 } 2913 } 2914 2915 void LIR_Assembler::align_backward_branch_target() { 2916 __ align(OptoLoopAlignment); 2917 } 2918 2919 2920 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2921 // make sure we are expecting a delay 2922 // this has the side effect of clearing the delay state 2923 // so we can use _masm instead of _masm->delayed() to do the 2924 // code generation. 2925 __ delayed(); 2926 2927 // make sure we only emit one instruction 2928 int offset = code_offset(); 2929 op->delay_op()->emit_code(this); 2930 #ifdef ASSERT 2931 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 2932 op->delay_op()->print(); 2933 } 2934 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 2935 "only one instruction can go in a delay slot"); 2936 #endif 2937 2938 // we may also be emitting the call info for the instruction 2939 // which we are the delay slot of. 2940 CodeEmitInfo* call_info = op->call_info(); 2941 if (call_info) { 2942 add_call_info(code_offset(), call_info); 2943 } 2944 2945 if (VerifyStackAtCalls) { 2946 _masm->sub(FP, SP, O7); 2947 _masm->cmp(O7, initial_frame_size_in_bytes()); 2948 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 2949 } 2950 } 2951 2952 2953 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2954 assert(left->is_register(), "can only handle registers"); 2955 2956 if (left->is_single_cpu()) { 2957 __ neg(left->as_register(), dest->as_register()); 2958 } else if (left->is_single_fpu()) { 2959 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 2960 } else if (left->is_double_fpu()) { 2961 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 2962 } else { 2963 assert (left->is_double_cpu(), "Must be a long"); 2964 Register Rlow = left->as_register_lo(); 2965 Register Rhi = left->as_register_hi(); 2966 #ifdef _LP64 2967 __ sub(G0, Rlow, dest->as_register_lo()); 2968 #else 2969 __ subcc(G0, Rlow, dest->as_register_lo()); 2970 __ subc (G0, Rhi, dest->as_register_hi()); 2971 #endif 2972 } 2973 } 2974 2975 2976 void LIR_Assembler::fxch(int i) { 2977 Unimplemented(); 2978 } 2979 2980 void LIR_Assembler::fld(int i) { 2981 Unimplemented(); 2982 } 2983 2984 void LIR_Assembler::ffree(int i) { 2985 Unimplemented(); 2986 } 2987 2988 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2989 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2990 2991 // if tmp is invalid, then the function being called doesn't destroy the thread 2992 if (tmp->is_valid()) { 2993 __ save_thread(tmp->as_register()); 2994 } 2995 __ call(dest, relocInfo::runtime_call_type); 2996 __ delayed()->nop(); 2997 if (info != NULL) { 2998 add_call_info_here(info); 2999 } 3000 if (tmp->is_valid()) { 3001 __ restore_thread(tmp->as_register()); 3002 } 3003 3004 #ifdef ASSERT 3005 __ verify_thread(); 3006 #endif // ASSERT 3007 } 3008 3009 3010 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3011 #ifdef _LP64 3012 ShouldNotReachHere(); 3013 #endif 3014 3015 NEEDS_CLEANUP; 3016 if (type == T_LONG) { 3017 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3018 3019 // (extended to allow indexed as well as constant displaced for JSR-166) 3020 Register idx = noreg; // contains either constant offset or index 3021 3022 int disp = mem_addr->disp(); 3023 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3024 if (!Assembler::is_simm13(disp)) { 3025 idx = O7; 3026 __ set(disp, idx); 3027 } 3028 } else { 3029 assert(disp == 0, "not both indexed and disp"); 3030 idx = mem_addr->index()->as_register(); 3031 } 3032 3033 int null_check_offset = -1; 3034 3035 Register base = mem_addr->base()->as_register(); 3036 if (src->is_register() && dest->is_address()) { 3037 // G4 is high half, G5 is low half 3038 if (VM_Version::v9_instructions_work()) { 3039 // clear the top bits of G5, and scale up G4 3040 __ srl (src->as_register_lo(), 0, G5); 3041 __ sllx(src->as_register_hi(), 32, G4); 3042 // combine the two halves into the 64 bits of G4 3043 __ or3(G4, G5, G4); 3044 null_check_offset = __ offset(); 3045 if (idx == noreg) { 3046 __ stx(G4, base, disp); 3047 } else { 3048 __ stx(G4, base, idx); 3049 } 3050 } else { 3051 __ mov (src->as_register_hi(), G4); 3052 __ mov (src->as_register_lo(), G5); 3053 null_check_offset = __ offset(); 3054 if (idx == noreg) { 3055 __ std(G4, base, disp); 3056 } else { 3057 __ std(G4, base, idx); 3058 } 3059 } 3060 } else if (src->is_address() && dest->is_register()) { 3061 null_check_offset = __ offset(); 3062 if (VM_Version::v9_instructions_work()) { 3063 if (idx == noreg) { 3064 __ ldx(base, disp, G5); 3065 } else { 3066 __ ldx(base, idx, G5); 3067 } 3068 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3069 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3070 } else { 3071 if (idx == noreg) { 3072 __ ldd(base, disp, G4); 3073 } else { 3074 __ ldd(base, idx, G4); 3075 } 3076 // G4 is high half, G5 is low half 3077 __ mov (G4, dest->as_register_hi()); 3078 __ mov (G5, dest->as_register_lo()); 3079 } 3080 } else { 3081 Unimplemented(); 3082 } 3083 if (info != NULL) { 3084 add_debug_info_for_null_check(null_check_offset, info); 3085 } 3086 3087 } else { 3088 // use normal move for all other volatiles since they don't need 3089 // special handling to remain atomic. 3090 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3091 } 3092 } 3093 3094 void LIR_Assembler::membar() { 3095 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3096 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3097 } 3098 3099 void LIR_Assembler::membar_acquire() { 3100 // no-op on TSO 3101 } 3102 3103 void LIR_Assembler::membar_release() { 3104 // no-op on TSO 3105 } 3106 3107 // Pack two sequential registers containing 32 bit values 3108 // into a single 64 bit register. 3109 // src and src->successor() are packed into dst 3110 // src and dst may be the same register. 3111 // Note: src is destroyed 3112 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3113 Register rs = src->as_register(); 3114 Register rd = dst->as_register_lo(); 3115 __ sllx(rs, 32, rs); 3116 __ srl(rs->successor(), 0, rs->successor()); 3117 __ or3(rs, rs->successor(), rd); 3118 } 3119 3120 // Unpack a 64 bit value in a register into 3121 // two sequential registers. 3122 // src is unpacked into dst and dst->successor() 3123 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3124 Register rs = src->as_register_lo(); 3125 Register rd = dst->as_register_hi(); 3126 assert_different_registers(rs, rd, rd->successor()); 3127 __ srlx(rs, 32, rd); 3128 __ srl (rs, 0, rd->successor()); 3129 } 3130 3131 3132 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 3133 LIR_Address* addr = addr_opr->as_address_ptr(); 3134 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); 3135 3136 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3137 } 3138 3139 3140 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3141 assert(result_reg->is_register(), "check"); 3142 __ mov(G2_thread, result_reg->as_register()); 3143 } 3144 3145 3146 void LIR_Assembler::peephole(LIR_List* lir) { 3147 LIR_OpList* inst = lir->instructions_list(); 3148 for (int i = 0; i < inst->length(); i++) { 3149 LIR_Op* op = inst->at(i); 3150 switch (op->code()) { 3151 case lir_cond_float_branch: 3152 case lir_branch: { 3153 LIR_OpBranch* branch = op->as_OpBranch(); 3154 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3155 LIR_Op* delay_op = NULL; 3156 // we'd like to be able to pull following instructions into 3157 // this slot but we don't know enough to do it safely yet so 3158 // only optimize block to block control flow. 3159 if (LIRFillDelaySlots && branch->block()) { 3160 LIR_Op* prev = inst->at(i - 1); 3161 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3162 // swap previous instruction into delay slot 3163 inst->at_put(i - 1, op); 3164 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3165 #ifndef PRODUCT 3166 if (LIRTracePeephole) { 3167 tty->print_cr("delayed"); 3168 inst->at(i - 1)->print(); 3169 inst->at(i)->print(); 3170 tty->cr(); 3171 } 3172 #endif 3173 continue; 3174 } 3175 } 3176 3177 if (!delay_op) { 3178 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3179 } 3180 inst->insert_before(i + 1, delay_op); 3181 break; 3182 } 3183 case lir_static_call: 3184 case lir_virtual_call: 3185 case lir_icvirtual_call: 3186 case lir_optvirtual_call: 3187 case lir_dynamic_call: { 3188 LIR_Op* prev = inst->at(i - 1); 3189 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3190 (op->code() != lir_virtual_call || 3191 !prev->result_opr()->is_single_cpu() || 3192 prev->result_opr()->as_register() != O0) && 3193 LIR_Assembler::is_single_instruction(prev)) { 3194 // Only moves without info can be put into the delay slot. 3195 // Also don't allow the setup of the receiver in the delay 3196 // slot for vtable calls. 3197 inst->at_put(i - 1, op); 3198 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3199 #ifndef PRODUCT 3200 if (LIRTracePeephole) { 3201 tty->print_cr("delayed"); 3202 inst->at(i - 1)->print(); 3203 inst->at(i)->print(); 3204 tty->cr(); 3205 } 3206 #endif 3207 } else { 3208 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3209 inst->insert_before(i + 1, delay_op); 3210 i++; 3211 } 3212 3213 #if defined(TIERED) && !defined(_LP64) 3214 // fixup the return value from G1 to O0/O1 for long returns. 3215 // It's done here instead of in LIRGenerator because there's 3216 // such a mismatch between the single reg and double reg 3217 // calling convention. 3218 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3219 if (callop->result_opr() == FrameMap::out_long_opr) { 3220 LIR_OpJavaCall* call; 3221 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3222 for (int a = 0; a < arguments->length(); a++) { 3223 arguments[a] = callop->arguments()[a]; 3224 } 3225 if (op->code() == lir_virtual_call) { 3226 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3227 callop->vtable_offset(), arguments, callop->info()); 3228 } else { 3229 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3230 callop->addr(), arguments, callop->info()); 3231 } 3232 inst->at_put(i - 1, call); 3233 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3234 T_LONG, lir_patch_none, NULL)); 3235 } 3236 #endif 3237 break; 3238 } 3239 } 3240 } 3241 } 3242 3243 3244 3245 3246 #undef __