1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/cardTableModRefBS.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "nativeInst_sparc.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 #define __ _masm-> 41 42 43 //------------------------------------------------------------ 44 45 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 47 if (opr->is_constant()) { 48 LIR_Const* constant = opr->as_constant_ptr(); 49 switch (constant->type()) { 50 case T_INT: { 51 jint value = constant->as_jint(); 52 return Assembler::is_simm13(value); 53 } 54 55 default: 56 return false; 57 } 58 } 59 return false; 60 } 61 62 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 64 switch (op->code()) { 65 case lir_null_check: 66 return true; 67 68 69 case lir_add: 70 case lir_ushr: 71 case lir_shr: 72 case lir_shl: 73 // integer shifts and adds are always one instruction 74 return op->result_opr()->is_single_cpu(); 75 76 77 case lir_move: { 78 LIR_Op1* op1 = op->as_Op1(); 79 LIR_Opr src = op1->in_opr(); 80 LIR_Opr dst = op1->result_opr(); 81 82 if (src == dst) { 83 NEEDS_CLEANUP; 84 // this works around a problem where moves with the same src and dst 85 // end up in the delay slot and then the assembler swallows the mov 86 // since it has no effect and then it complains because the delay slot 87 // is empty. returning false stops the optimizer from putting this in 88 // the delay slot 89 return false; 90 } 91 92 // don't put moves involving oops into the delay slot since the VerifyOops code 93 // will make it much larger than a single instruction. 94 if (VerifyOops) { 95 return false; 96 } 97 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 100 return false; 101 } 102 103 if (UseCompressedOops) { 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 106 } 107 108 if (UseCompressedClassPointers) { 109 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && 110 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; 111 } 112 113 if (dst->is_register()) { 114 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 115 return !PatchALot; 116 } else if (src->is_single_stack()) { 117 return true; 118 } 119 } 120 121 if (src->is_register()) { 122 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 123 return !PatchALot; 124 } else if (dst->is_single_stack()) { 125 return true; 126 } 127 } 128 129 if (dst->is_register() && 130 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 131 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 132 return true; 133 } 134 135 return false; 136 } 137 138 default: 139 return false; 140 } 141 ShouldNotReachHere(); 142 } 143 144 145 LIR_Opr LIR_Assembler::receiverOpr() { 146 return FrameMap::O0_oop_opr; 147 } 148 149 150 LIR_Opr LIR_Assembler::osrBufferPointer() { 151 return FrameMap::I0_opr; 152 } 153 154 155 int LIR_Assembler::initial_frame_size_in_bytes() const { 156 return in_bytes(frame_map()->framesize_in_bytes()); 157 } 158 159 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 161 // we fetch the class of the receiver (O0) and compare it with the cached class. 162 // If they do not match we jump to slow case. 163 int LIR_Assembler::check_icache() { 164 int offset = __ offset(); 165 __ inline_cache_check(O0, G5_inline_cache_reg); 166 return offset; 167 } 168 169 170 void LIR_Assembler::osr_entry() { 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 172 // 173 // 1. Create a new compiled activation. 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 175 // at the osr_bci; it is not initialized. 176 // 3. Jump to the continuation address in compiled code to resume execution. 177 178 // OSR entry point 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 181 ValueStack* entry_state = osr_entry->end()->state(); 182 int number_of_locks = entry_state->locks_size(); 183 184 // Create a frame for the compiled activation. 185 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 186 187 // OSR buffer is 188 // 189 // locals[nlocals-1..0] 190 // monitors[number_of_locks-1..0] 191 // 192 // locals is a direct copy of the interpreter frame so in the osr buffer 193 // so first slot in the local array is the last local from the interpreter 194 // and last slot is local[0] (receiver) from the interpreter 195 // 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 198 // in the interpreter frame (the method lock if a sync method) 199 200 // Initialize monitors in the compiled activation. 201 // I0: pointer to osr buffer 202 // 203 // All other registers are dead at this point and the locals will be 204 // copied into place by code emitted in the IR. 205 206 Register OSR_buf = osrBufferPointer()->as_register(); 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 208 int monitor_offset = BytesPerWord * method()->max_locals() + 209 (2 * BytesPerWord) * (number_of_locks - 1); 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 211 // the OSR buffer using 2 word entries: first the lock and then 212 // the oop. 213 for (int i = 0; i < number_of_locks; i++) { 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 215 #ifdef ASSERT 216 // verify the interpreter's monitor has a non-null object 217 { 218 Label L; 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 220 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 221 __ stop("locked object is NULL"); 222 __ bind(L); 223 } 224 #endif // ASSERT 225 // Copy the lock field into the compiled activation. 226 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 230 } 231 } 232 } 233 234 235 // Optimized Library calls 236 // This is the fast version of java.lang.String.compare; it has not 237 // OSR-entry and therefore, we generate a slow version for OSR's 238 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { 239 Register str0 = left->as_register(); 240 Register str1 = right->as_register(); 241 242 Label Ldone; 243 244 Register result = dst->as_register(); 245 { 246 // Get a pointer to the first character of string0 in tmp0 247 // and get string0.length() in str0 248 // Get a pointer to the first character of string1 in tmp1 249 // and get string1.length() in str1 250 // Also, get string0.length()-string1.length() in 251 // o7 and get the condition code set 252 // Note: some instructions have been hoisted for better instruction scheduling 253 254 Register tmp0 = L0; 255 Register tmp1 = L1; 256 Register tmp2 = L2; 257 258 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array 259 if (java_lang_String::has_offset_field()) { 260 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 261 int count_offset = java_lang_String:: count_offset_in_bytes(); 262 __ load_heap_oop(str0, value_offset, tmp0); 263 __ ld(str0, offset_offset, tmp2); 264 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 265 __ ld(str0, count_offset, str0); 266 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 267 } else { 268 __ load_heap_oop(str0, value_offset, tmp1); 269 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 270 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0); 271 } 272 273 // str1 may be null 274 add_debug_info_for_null_check_here(info); 275 276 if (java_lang_String::has_offset_field()) { 277 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 278 int count_offset = java_lang_String:: count_offset_in_bytes(); 279 __ load_heap_oop(str1, value_offset, tmp1); 280 __ add(tmp0, tmp2, tmp0); 281 282 __ ld(str1, offset_offset, tmp2); 283 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 284 __ ld(str1, count_offset, str1); 285 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 286 __ add(tmp1, tmp2, tmp1); 287 } else { 288 __ load_heap_oop(str1, value_offset, tmp2); 289 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 290 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1); 291 } 292 __ subcc(str0, str1, O7); 293 } 294 295 { 296 // Compute the minimum of the string lengths, scale it and store it in limit 297 Register count0 = I0; 298 Register count1 = I1; 299 Register limit = L3; 300 301 Label Lskip; 302 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter 303 __ br(Assembler::greater, true, Assembler::pt, Lskip); 304 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter 305 __ bind(Lskip); 306 307 // If either string is empty (or both of them) the result is the difference in lengths 308 __ cmp(limit, 0); 309 __ br(Assembler::equal, true, Assembler::pn, Ldone); 310 __ delayed()->mov(O7, result); // result is difference in lengths 311 } 312 313 { 314 // Neither string is empty 315 Label Lloop; 316 317 Register base0 = L0; 318 Register base1 = L1; 319 Register chr0 = I0; 320 Register chr1 = I1; 321 Register limit = L3; 322 323 // Shift base0 and base1 to the end of the arrays, negate limit 324 __ add(base0, limit, base0); 325 __ add(base1, limit, base1); 326 __ neg(limit); // limit = -min{string0.length(), string1.length()} 327 328 __ lduh(base0, limit, chr0); 329 __ bind(Lloop); 330 __ lduh(base1, limit, chr1); 331 __ subcc(chr0, chr1, chr0); 332 __ br(Assembler::notZero, false, Assembler::pn, Ldone); 333 assert(chr0 == result, "result must be pre-placed"); 334 __ delayed()->inccc(limit, sizeof(jchar)); 335 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 336 __ delayed()->lduh(base0, limit, chr0); 337 } 338 339 // If strings are equal up to min length, return the length difference. 340 __ mov(O7, result); 341 342 // Otherwise, return the difference between the first mismatched chars. 343 __ bind(Ldone); 344 } 345 346 347 // -------------------------------------------------------------------------------------------- 348 349 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 350 if (!GenerateSynchronizationCode) return; 351 352 Register obj_reg = obj_opr->as_register(); 353 Register lock_reg = lock_opr->as_register(); 354 355 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 356 Register reg = mon_addr.base(); 357 int offset = mon_addr.disp(); 358 // compute pointer to BasicLock 359 if (mon_addr.is_simm13()) { 360 __ add(reg, offset, lock_reg); 361 } 362 else { 363 __ set(offset, lock_reg); 364 __ add(reg, lock_reg, lock_reg); 365 } 366 // unlock object 367 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 368 // _slow_case_stubs->append(slow_case); 369 // temporary fix: must be created after exceptionhandler, therefore as call stub 370 _slow_case_stubs->append(slow_case); 371 if (UseFastLocking) { 372 // try inlined fast unlocking first, revert to slow locking if it fails 373 // note: lock_reg points to the displaced header since the displaced header offset is 0! 374 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 375 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 376 } else { 377 // always do slow unlocking 378 // note: the slow unlocking code could be inlined here, however if we use 379 // slow unlocking, speed doesn't matter anyway and this solution is 380 // simpler and requires less duplicated code - additionally, the 381 // slow unlocking code is the same in either case which simplifies 382 // debugging 383 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 384 __ delayed()->nop(); 385 } 386 // done 387 __ bind(*slow_case->continuation()); 388 } 389 390 391 int LIR_Assembler::emit_exception_handler() { 392 // if the last instruction is a call (typically to do a throw which 393 // is coming at the end after block reordering) the return address 394 // must still point into the code area in order to avoid assertion 395 // failures when searching for the corresponding bci => add a nop 396 // (was bug 5/14/1999 - gri) 397 __ nop(); 398 399 // generate code for exception handler 400 ciMethod* method = compilation()->method(); 401 402 address handler_base = __ start_a_stub(exception_handler_size); 403 404 if (handler_base == NULL) { 405 // not enough space left for the handler 406 bailout("exception handler overflow"); 407 return -1; 408 } 409 410 int offset = code_offset(); 411 412 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 413 __ delayed()->nop(); 414 __ should_not_reach_here(); 415 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 416 __ end_a_stub(); 417 418 return offset; 419 } 420 421 422 // Emit the code to remove the frame from the stack in the exception 423 // unwind path. 424 int LIR_Assembler::emit_unwind_handler() { 425 #ifndef PRODUCT 426 if (CommentedAssembly) { 427 _masm->block_comment("Unwind handler"); 428 } 429 #endif 430 431 int offset = code_offset(); 432 433 // Fetch the exception from TLS and clear out exception related thread state 434 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 435 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 436 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 437 438 __ bind(_unwind_handler_entry); 439 __ verify_not_null_oop(O0); 440 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 441 __ mov(O0, I0); // Preserve the exception 442 } 443 444 // Preform needed unlocking 445 MonitorExitStub* stub = NULL; 446 if (method()->is_synchronized()) { 447 monitor_address(0, FrameMap::I1_opr); 448 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 449 __ unlock_object(I3, I2, I1, *stub->entry()); 450 __ bind(*stub->continuation()); 451 } 452 453 if (compilation()->env()->dtrace_method_probes()) { 454 __ mov(G2_thread, O0); 455 __ save_thread(I1); // need to preserve thread in G2 across 456 // runtime call 457 metadata2reg(method()->constant_encoding(), O1); 458 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 459 __ delayed()->nop(); 460 __ restore_thread(I1); 461 } 462 463 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 464 __ mov(I0, O0); // Restore the exception 465 } 466 467 // dispatch to the unwind logic 468 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 469 __ delayed()->nop(); 470 471 // Emit the slow path assembly 472 if (stub != NULL) { 473 stub->emit_code(this); 474 } 475 476 return offset; 477 } 478 479 480 int LIR_Assembler::emit_deopt_handler() { 481 // if the last instruction is a call (typically to do a throw which 482 // is coming at the end after block reordering) the return address 483 // must still point into the code area in order to avoid assertion 484 // failures when searching for the corresponding bci => add a nop 485 // (was bug 5/14/1999 - gri) 486 __ nop(); 487 488 // generate code for deopt handler 489 ciMethod* method = compilation()->method(); 490 address handler_base = __ start_a_stub(deopt_handler_size); 491 if (handler_base == NULL) { 492 // not enough space left for the handler 493 bailout("deopt handler overflow"); 494 return -1; 495 } 496 497 int offset = code_offset(); 498 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 499 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 500 __ delayed()->nop(); 501 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 502 __ end_a_stub(); 503 504 return offset; 505 } 506 507 508 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 509 if (o == NULL) { 510 __ set(NULL_WORD, reg); 511 } else { 512 int oop_index = __ oop_recorder()->find_index(o); 513 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop"); 514 RelocationHolder rspec = oop_Relocation::spec(oop_index); 515 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 516 } 517 } 518 519 520 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 521 // Allocate a new index in table to hold the object once it's been patched 522 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 523 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 524 525 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 526 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 527 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 528 // NULL will be dynamically patched later and the patched value may be large. We must 529 // therefore generate the sethi/add as a placeholders 530 __ patchable_set(addrlit, reg); 531 532 patching_epilog(patch, lir_patch_normal, reg, info); 533 } 534 535 536 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 537 __ set_metadata_constant(o, reg); 538 } 539 540 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 541 // Allocate a new index in table to hold the klass once it's been patched 542 int index = __ oop_recorder()->allocate_metadata_index(NULL); 543 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 544 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); 545 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 546 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 547 // NULL will be dynamically patched later and the patched value may be large. We must 548 // therefore generate the sethi/add as a placeholders 549 __ patchable_set(addrlit, reg); 550 551 patching_epilog(patch, lir_patch_normal, reg, info); 552 } 553 554 void LIR_Assembler::emit_op3(LIR_Op3* op) { 555 Register Rdividend = op->in_opr1()->as_register(); 556 Register Rdivisor = noreg; 557 Register Rscratch = op->in_opr3()->as_register(); 558 Register Rresult = op->result_opr()->as_register(); 559 int divisor = -1; 560 561 if (op->in_opr2()->is_register()) { 562 Rdivisor = op->in_opr2()->as_register(); 563 } else { 564 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 565 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 566 } 567 568 assert(Rdividend != Rscratch, ""); 569 assert(Rdivisor != Rscratch, ""); 570 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 571 572 if (Rdivisor == noreg && is_power_of_2(divisor)) { 573 // convert division by a power of two into some shifts and logical operations 574 if (op->code() == lir_idiv) { 575 if (divisor == 2) { 576 __ srl(Rdividend, 31, Rscratch); 577 } else { 578 __ sra(Rdividend, 31, Rscratch); 579 __ and3(Rscratch, divisor - 1, Rscratch); 580 } 581 __ add(Rdividend, Rscratch, Rscratch); 582 __ sra(Rscratch, log2_intptr(divisor), Rresult); 583 return; 584 } else { 585 if (divisor == 2) { 586 __ srl(Rdividend, 31, Rscratch); 587 } else { 588 __ sra(Rdividend, 31, Rscratch); 589 __ and3(Rscratch, divisor - 1,Rscratch); 590 } 591 __ add(Rdividend, Rscratch, Rscratch); 592 __ andn(Rscratch, divisor - 1,Rscratch); 593 __ sub(Rdividend, Rscratch, Rresult); 594 return; 595 } 596 } 597 598 __ sra(Rdividend, 31, Rscratch); 599 __ wry(Rscratch); 600 601 add_debug_info_for_div0_here(op->info()); 602 603 if (Rdivisor != noreg) { 604 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 605 } else { 606 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 607 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 608 } 609 610 Label skip; 611 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 612 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 613 __ bind(skip); 614 615 if (op->code() == lir_irem) { 616 if (Rdivisor != noreg) { 617 __ smul(Rscratch, Rdivisor, Rscratch); 618 } else { 619 __ smul(Rscratch, divisor, Rscratch); 620 } 621 __ sub(Rdividend, Rscratch, Rresult); 622 } 623 } 624 625 626 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 627 #ifdef ASSERT 628 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 629 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 630 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 631 #endif 632 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 633 634 if (op->cond() == lir_cond_always) { 635 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 636 } else if (op->code() == lir_cond_float_branch) { 637 assert(op->ublock() != NULL, "must have unordered successor"); 638 bool is_unordered = (op->ublock() == op->block()); 639 Assembler::Condition acond; 640 switch (op->cond()) { 641 case lir_cond_equal: acond = Assembler::f_equal; break; 642 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 643 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 644 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 645 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 646 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 647 default : ShouldNotReachHere(); 648 } 649 __ fb( acond, false, Assembler::pn, *(op->label())); 650 } else { 651 assert (op->code() == lir_branch, "just checking"); 652 653 Assembler::Condition acond; 654 switch (op->cond()) { 655 case lir_cond_equal: acond = Assembler::equal; break; 656 case lir_cond_notEqual: acond = Assembler::notEqual; break; 657 case lir_cond_less: acond = Assembler::less; break; 658 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 659 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 660 case lir_cond_greater: acond = Assembler::greater; break; 661 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 662 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 663 default: ShouldNotReachHere(); 664 }; 665 666 // sparc has different condition codes for testing 32-bit 667 // vs. 64-bit values. We could always test xcc is we could 668 // guarantee that 32-bit loads always sign extended but that isn't 669 // true and since sign extension isn't free, it would impose a 670 // slight cost. 671 #ifdef _LP64 672 if (op->type() == T_INT) { 673 __ br(acond, false, Assembler::pn, *(op->label())); 674 } else 675 #endif 676 __ brx(acond, false, Assembler::pn, *(op->label())); 677 } 678 // The peephole pass fills the delay slot 679 } 680 681 682 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 683 Bytecodes::Code code = op->bytecode(); 684 LIR_Opr dst = op->result_opr(); 685 686 switch(code) { 687 case Bytecodes::_i2l: { 688 Register rlo = dst->as_register_lo(); 689 Register rhi = dst->as_register_hi(); 690 Register rval = op->in_opr()->as_register(); 691 #ifdef _LP64 692 __ sra(rval, 0, rlo); 693 #else 694 __ mov(rval, rlo); 695 __ sra(rval, BitsPerInt-1, rhi); 696 #endif 697 break; 698 } 699 case Bytecodes::_i2d: 700 case Bytecodes::_i2f: { 701 bool is_double = (code == Bytecodes::_i2d); 702 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 703 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 704 FloatRegister rsrc = op->in_opr()->as_float_reg(); 705 if (rsrc != rdst) { 706 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 707 } 708 __ fitof(w, rdst, rdst); 709 break; 710 } 711 case Bytecodes::_f2i:{ 712 FloatRegister rsrc = op->in_opr()->as_float_reg(); 713 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 714 Label L; 715 // result must be 0 if value is NaN; test by comparing value to itself 716 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 717 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 718 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 719 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 720 // move integer result from float register to int register 721 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 722 __ bind (L); 723 break; 724 } 725 case Bytecodes::_l2i: { 726 Register rlo = op->in_opr()->as_register_lo(); 727 Register rhi = op->in_opr()->as_register_hi(); 728 Register rdst = dst->as_register(); 729 #ifdef _LP64 730 __ sra(rlo, 0, rdst); 731 #else 732 __ mov(rlo, rdst); 733 #endif 734 break; 735 } 736 case Bytecodes::_d2f: 737 case Bytecodes::_f2d: { 738 bool is_double = (code == Bytecodes::_f2d); 739 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 740 LIR_Opr val = op->in_opr(); 741 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 742 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 743 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 744 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 745 __ ftof(vw, dw, rval, rdst); 746 break; 747 } 748 case Bytecodes::_i2s: 749 case Bytecodes::_i2b: { 750 Register rval = op->in_opr()->as_register(); 751 Register rdst = dst->as_register(); 752 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 753 __ sll (rval, shift, rdst); 754 __ sra (rdst, shift, rdst); 755 break; 756 } 757 case Bytecodes::_i2c: { 758 Register rval = op->in_opr()->as_register(); 759 Register rdst = dst->as_register(); 760 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 761 __ sll (rval, shift, rdst); 762 __ srl (rdst, shift, rdst); 763 break; 764 } 765 766 default: ShouldNotReachHere(); 767 } 768 } 769 770 771 void LIR_Assembler::align_call(LIR_Code) { 772 // do nothing since all instructions are word aligned on sparc 773 } 774 775 776 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 777 __ call(op->addr(), rtype); 778 // The peephole pass fills the delay slot, add_call_info is done in 779 // LIR_Assembler::emit_delay. 780 } 781 782 783 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 784 __ ic_call(op->addr(), false); 785 // The peephole pass fills the delay slot, add_call_info is done in 786 // LIR_Assembler::emit_delay. 787 } 788 789 790 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 791 add_debug_info_for_null_check_here(op->info()); 792 __ load_klass(O0, G3_scratch); 793 if (Assembler::is_simm13(op->vtable_offset())) { 794 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 795 } else { 796 // This will generate 2 instructions 797 __ set(op->vtable_offset(), G5_method); 798 // ld_ptr, set_hi, set 799 __ ld_ptr(G3_scratch, G5_method, G5_method); 800 } 801 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); 802 __ callr(G3_scratch, G0); 803 // the peephole pass fills the delay slot 804 } 805 806 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 807 int store_offset; 808 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 809 assert(!unaligned, "can't handle this"); 810 // for offsets larger than a simm13 we setup the offset in O7 811 __ set(offset, O7); 812 store_offset = store(from_reg, base, O7, type, wide); 813 } else { 814 if (type == T_ARRAY || type == T_OBJECT) { 815 __ verify_oop(from_reg->as_register()); 816 } 817 store_offset = code_offset(); 818 switch (type) { 819 case T_BOOLEAN: // fall through 820 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 821 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 822 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 823 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 824 case T_LONG : 825 #ifdef _LP64 826 if (unaligned || PatchALot) { 827 __ srax(from_reg->as_register_lo(), 32, O7); 828 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 829 __ stw(O7, base, offset + hi_word_offset_in_bytes); 830 } else { 831 __ stx(from_reg->as_register_lo(), base, offset); 832 } 833 #else 834 assert(Assembler::is_simm13(offset + 4), "must be"); 835 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 836 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 837 #endif 838 break; 839 case T_ADDRESS: 840 case T_METADATA: 841 __ st_ptr(from_reg->as_register(), base, offset); 842 break; 843 case T_ARRAY : // fall through 844 case T_OBJECT: 845 { 846 if (UseCompressedOops && !wide) { 847 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 848 store_offset = code_offset(); 849 __ stw(G3_scratch, base, offset); 850 } else { 851 __ st_ptr(from_reg->as_register(), base, offset); 852 } 853 break; 854 } 855 856 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 857 case T_DOUBLE: 858 { 859 FloatRegister reg = from_reg->as_double_reg(); 860 // split unaligned stores 861 if (unaligned || PatchALot) { 862 assert(Assembler::is_simm13(offset + 4), "must be"); 863 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 864 __ stf(FloatRegisterImpl::S, reg, base, offset); 865 } else { 866 __ stf(FloatRegisterImpl::D, reg, base, offset); 867 } 868 break; 869 } 870 default : ShouldNotReachHere(); 871 } 872 } 873 return store_offset; 874 } 875 876 877 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 878 if (type == T_ARRAY || type == T_OBJECT) { 879 __ verify_oop(from_reg->as_register()); 880 } 881 int store_offset = code_offset(); 882 switch (type) { 883 case T_BOOLEAN: // fall through 884 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 885 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 886 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 887 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 888 case T_LONG : 889 #ifdef _LP64 890 __ stx(from_reg->as_register_lo(), base, disp); 891 #else 892 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 893 __ std(from_reg->as_register_hi(), base, disp); 894 #endif 895 break; 896 case T_ADDRESS: 897 __ st_ptr(from_reg->as_register(), base, disp); 898 break; 899 case T_ARRAY : // fall through 900 case T_OBJECT: 901 { 902 if (UseCompressedOops && !wide) { 903 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 904 store_offset = code_offset(); 905 __ stw(G3_scratch, base, disp); 906 } else { 907 __ st_ptr(from_reg->as_register(), base, disp); 908 } 909 break; 910 } 911 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 912 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 913 default : ShouldNotReachHere(); 914 } 915 return store_offset; 916 } 917 918 919 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 920 int load_offset; 921 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 922 assert(base != O7, "destroying register"); 923 assert(!unaligned, "can't handle this"); 924 // for offsets larger than a simm13 we setup the offset in O7 925 __ set(offset, O7); 926 load_offset = load(base, O7, to_reg, type, wide); 927 } else { 928 load_offset = code_offset(); 929 switch(type) { 930 case T_BOOLEAN: // fall through 931 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 932 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 933 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 934 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 935 case T_LONG : 936 if (!unaligned) { 937 #ifdef _LP64 938 __ ldx(base, offset, to_reg->as_register_lo()); 939 #else 940 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 941 "must be sequential"); 942 __ ldd(base, offset, to_reg->as_register_hi()); 943 #endif 944 } else { 945 #ifdef _LP64 946 assert(base != to_reg->as_register_lo(), "can't handle this"); 947 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 948 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 949 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 950 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 951 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 952 #else 953 if (base == to_reg->as_register_lo()) { 954 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 955 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 956 } else { 957 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 958 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 959 } 960 #endif 961 } 962 break; 963 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; 964 case T_ADDRESS: 965 #ifdef _LP64 966 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 967 __ lduw(base, offset, to_reg->as_register()); 968 __ decode_klass_not_null(to_reg->as_register()); 969 } else 970 #endif 971 { 972 __ ld_ptr(base, offset, to_reg->as_register()); 973 } 974 break; 975 case T_ARRAY : // fall through 976 case T_OBJECT: 977 { 978 if (UseCompressedOops && !wide) { 979 __ lduw(base, offset, to_reg->as_register()); 980 __ decode_heap_oop(to_reg->as_register()); 981 } else { 982 __ ld_ptr(base, offset, to_reg->as_register()); 983 } 984 break; 985 } 986 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 987 case T_DOUBLE: 988 { 989 FloatRegister reg = to_reg->as_double_reg(); 990 // split unaligned loads 991 if (unaligned || PatchALot) { 992 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 993 __ ldf(FloatRegisterImpl::S, base, offset, reg); 994 } else { 995 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 996 } 997 break; 998 } 999 default : ShouldNotReachHere(); 1000 } 1001 if (type == T_ARRAY || type == T_OBJECT) { 1002 __ verify_oop(to_reg->as_register()); 1003 } 1004 } 1005 return load_offset; 1006 } 1007 1008 1009 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 1010 int load_offset = code_offset(); 1011 switch(type) { 1012 case T_BOOLEAN: // fall through 1013 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 1014 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 1015 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 1016 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 1017 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 1018 case T_ARRAY : // fall through 1019 case T_OBJECT: 1020 { 1021 if (UseCompressedOops && !wide) { 1022 __ lduw(base, disp, to_reg->as_register()); 1023 __ decode_heap_oop(to_reg->as_register()); 1024 } else { 1025 __ ld_ptr(base, disp, to_reg->as_register()); 1026 } 1027 break; 1028 } 1029 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 1030 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 1031 case T_LONG : 1032 #ifdef _LP64 1033 __ ldx(base, disp, to_reg->as_register_lo()); 1034 #else 1035 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 1036 "must be sequential"); 1037 __ ldd(base, disp, to_reg->as_register_hi()); 1038 #endif 1039 break; 1040 default : ShouldNotReachHere(); 1041 } 1042 if (type == T_ARRAY || type == T_OBJECT) { 1043 __ verify_oop(to_reg->as_register()); 1044 } 1045 return load_offset; 1046 } 1047 1048 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 1049 LIR_Const* c = src->as_constant_ptr(); 1050 switch (c->type()) { 1051 case T_INT: 1052 case T_FLOAT: { 1053 Register src_reg = O7; 1054 int value = c->as_jint_bits(); 1055 if (value == 0) { 1056 src_reg = G0; 1057 } else { 1058 __ set(value, O7); 1059 } 1060 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1061 __ stw(src_reg, addr.base(), addr.disp()); 1062 break; 1063 } 1064 case T_ADDRESS: { 1065 Register src_reg = O7; 1066 int value = c->as_jint_bits(); 1067 if (value == 0) { 1068 src_reg = G0; 1069 } else { 1070 __ set(value, O7); 1071 } 1072 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1073 __ st_ptr(src_reg, addr.base(), addr.disp()); 1074 break; 1075 } 1076 case T_OBJECT: { 1077 Register src_reg = O7; 1078 jobject2reg(c->as_jobject(), src_reg); 1079 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1080 __ st_ptr(src_reg, addr.base(), addr.disp()); 1081 break; 1082 } 1083 case T_LONG: 1084 case T_DOUBLE: { 1085 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1086 1087 Register tmp = O7; 1088 int value_lo = c->as_jint_lo_bits(); 1089 if (value_lo == 0) { 1090 tmp = G0; 1091 } else { 1092 __ set(value_lo, O7); 1093 } 1094 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 1095 int value_hi = c->as_jint_hi_bits(); 1096 if (value_hi == 0) { 1097 tmp = G0; 1098 } else { 1099 __ set(value_hi, O7); 1100 } 1101 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 1102 break; 1103 } 1104 default: 1105 Unimplemented(); 1106 } 1107 } 1108 1109 1110 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1111 LIR_Const* c = src->as_constant_ptr(); 1112 LIR_Address* addr = dest->as_address_ptr(); 1113 Register base = addr->base()->as_pointer_register(); 1114 int offset = -1; 1115 1116 switch (c->type()) { 1117 case T_INT: 1118 case T_FLOAT: 1119 case T_ADDRESS: { 1120 LIR_Opr tmp = FrameMap::O7_opr; 1121 int value = c->as_jint_bits(); 1122 if (value == 0) { 1123 tmp = FrameMap::G0_opr; 1124 } else if (Assembler::is_simm13(value)) { 1125 __ set(value, O7); 1126 } 1127 if (addr->index()->is_valid()) { 1128 assert(addr->disp() == 0, "must be zero"); 1129 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1130 } else { 1131 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1132 offset = store(tmp, base, addr->disp(), type, wide, false); 1133 } 1134 break; 1135 } 1136 case T_LONG: 1137 case T_DOUBLE: { 1138 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1139 assert(Assembler::is_simm13(addr->disp()) && 1140 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1141 1142 LIR_Opr tmp = FrameMap::O7_opr; 1143 int value_lo = c->as_jint_lo_bits(); 1144 if (value_lo == 0) { 1145 tmp = FrameMap::G0_opr; 1146 } else { 1147 __ set(value_lo, O7); 1148 } 1149 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1150 int value_hi = c->as_jint_hi_bits(); 1151 if (value_hi == 0) { 1152 tmp = FrameMap::G0_opr; 1153 } else { 1154 __ set(value_hi, O7); 1155 } 1156 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1157 break; 1158 } 1159 case T_OBJECT: { 1160 jobject obj = c->as_jobject(); 1161 LIR_Opr tmp; 1162 if (obj == NULL) { 1163 tmp = FrameMap::G0_opr; 1164 } else { 1165 tmp = FrameMap::O7_opr; 1166 jobject2reg(c->as_jobject(), O7); 1167 } 1168 // handle either reg+reg or reg+disp address 1169 if (addr->index()->is_valid()) { 1170 assert(addr->disp() == 0, "must be zero"); 1171 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1172 } else { 1173 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1174 offset = store(tmp, base, addr->disp(), type, wide, false); 1175 } 1176 1177 break; 1178 } 1179 default: 1180 Unimplemented(); 1181 } 1182 if (info != NULL) { 1183 assert(offset != -1, "offset should've been set"); 1184 add_debug_info_for_null_check(offset, info); 1185 } 1186 } 1187 1188 1189 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1190 LIR_Const* c = src->as_constant_ptr(); 1191 LIR_Opr to_reg = dest; 1192 1193 switch (c->type()) { 1194 case T_INT: 1195 case T_ADDRESS: 1196 { 1197 jint con = c->as_jint(); 1198 if (to_reg->is_single_cpu()) { 1199 assert(patch_code == lir_patch_none, "no patching handled here"); 1200 __ set(con, to_reg->as_register()); 1201 } else { 1202 ShouldNotReachHere(); 1203 assert(to_reg->is_single_fpu(), "wrong register kind"); 1204 1205 __ set(con, O7); 1206 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1207 __ st(O7, temp_slot); 1208 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1209 } 1210 } 1211 break; 1212 1213 case T_LONG: 1214 { 1215 jlong con = c->as_jlong(); 1216 1217 if (to_reg->is_double_cpu()) { 1218 #ifdef _LP64 1219 __ set(con, to_reg->as_register_lo()); 1220 #else 1221 __ set(low(con), to_reg->as_register_lo()); 1222 __ set(high(con), to_reg->as_register_hi()); 1223 #endif 1224 #ifdef _LP64 1225 } else if (to_reg->is_single_cpu()) { 1226 __ set(con, to_reg->as_register()); 1227 #endif 1228 } else { 1229 ShouldNotReachHere(); 1230 assert(to_reg->is_double_fpu(), "wrong register kind"); 1231 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1232 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1233 __ set(low(con), O7); 1234 __ st(O7, temp_slot_lo); 1235 __ set(high(con), O7); 1236 __ st(O7, temp_slot_hi); 1237 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1238 } 1239 } 1240 break; 1241 1242 case T_OBJECT: 1243 { 1244 if (patch_code == lir_patch_none) { 1245 jobject2reg(c->as_jobject(), to_reg->as_register()); 1246 } else { 1247 jobject2reg_with_patching(to_reg->as_register(), info); 1248 } 1249 } 1250 break; 1251 1252 case T_METADATA: 1253 { 1254 if (patch_code == lir_patch_none) { 1255 metadata2reg(c->as_metadata(), to_reg->as_register()); 1256 } else { 1257 klass2reg_with_patching(to_reg->as_register(), info); 1258 } 1259 } 1260 break; 1261 1262 case T_FLOAT: 1263 { 1264 address const_addr = __ float_constant(c->as_jfloat()); 1265 if (const_addr == NULL) { 1266 bailout("const section overflow"); 1267 break; 1268 } 1269 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1270 AddressLiteral const_addrlit(const_addr, rspec); 1271 if (to_reg->is_single_fpu()) { 1272 __ patchable_sethi(const_addrlit, O7); 1273 __ relocate(rspec); 1274 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1275 1276 } else { 1277 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1278 1279 __ set(const_addrlit, O7); 1280 __ ld(O7, 0, to_reg->as_register()); 1281 } 1282 } 1283 break; 1284 1285 case T_DOUBLE: 1286 { 1287 address const_addr = __ double_constant(c->as_jdouble()); 1288 if (const_addr == NULL) { 1289 bailout("const section overflow"); 1290 break; 1291 } 1292 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1293 1294 if (to_reg->is_double_fpu()) { 1295 AddressLiteral const_addrlit(const_addr, rspec); 1296 __ patchable_sethi(const_addrlit, O7); 1297 __ relocate(rspec); 1298 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1299 } else { 1300 assert(to_reg->is_double_cpu(), "Must be a long register."); 1301 #ifdef _LP64 1302 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1303 #else 1304 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1305 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1306 #endif 1307 } 1308 1309 } 1310 break; 1311 1312 default: 1313 ShouldNotReachHere(); 1314 } 1315 } 1316 1317 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1318 Register reg = addr->base()->as_pointer_register(); 1319 LIR_Opr index = addr->index(); 1320 if (index->is_illegal()) { 1321 return Address(reg, addr->disp()); 1322 } else { 1323 assert (addr->disp() == 0, "unsupported address mode"); 1324 return Address(reg, index->as_pointer_register()); 1325 } 1326 } 1327 1328 1329 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1330 switch (type) { 1331 case T_INT: 1332 case T_FLOAT: { 1333 Register tmp = O7; 1334 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1335 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1336 __ lduw(from.base(), from.disp(), tmp); 1337 __ stw(tmp, to.base(), to.disp()); 1338 break; 1339 } 1340 case T_OBJECT: { 1341 Register tmp = O7; 1342 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1343 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1344 __ ld_ptr(from.base(), from.disp(), tmp); 1345 __ st_ptr(tmp, to.base(), to.disp()); 1346 break; 1347 } 1348 case T_LONG: 1349 case T_DOUBLE: { 1350 Register tmp = O7; 1351 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1352 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1353 __ lduw(from.base(), from.disp(), tmp); 1354 __ stw(tmp, to.base(), to.disp()); 1355 __ lduw(from.base(), from.disp() + 4, tmp); 1356 __ stw(tmp, to.base(), to.disp() + 4); 1357 break; 1358 } 1359 1360 default: 1361 ShouldNotReachHere(); 1362 } 1363 } 1364 1365 1366 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1367 Address base = as_Address(addr); 1368 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1369 } 1370 1371 1372 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1373 Address base = as_Address(addr); 1374 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1375 } 1376 1377 1378 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1379 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1380 1381 assert(type != T_METADATA, "load of metadata ptr not supported"); 1382 LIR_Address* addr = src_opr->as_address_ptr(); 1383 LIR_Opr to_reg = dest; 1384 1385 Register src = addr->base()->as_pointer_register(); 1386 Register disp_reg = noreg; 1387 int disp_value = addr->disp(); 1388 bool needs_patching = (patch_code != lir_patch_none); 1389 1390 if (addr->base()->type() == T_OBJECT) { 1391 __ verify_oop(src); 1392 } 1393 1394 PatchingStub* patch = NULL; 1395 if (needs_patching) { 1396 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1397 assert(!to_reg->is_double_cpu() || 1398 patch_code == lir_patch_none || 1399 patch_code == lir_patch_normal, "patching doesn't match register"); 1400 } 1401 1402 if (addr->index()->is_illegal()) { 1403 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1404 if (needs_patching) { 1405 __ patchable_set(0, O7); 1406 } else { 1407 __ set(disp_value, O7); 1408 } 1409 disp_reg = O7; 1410 } 1411 } else if (unaligned || PatchALot) { 1412 __ add(src, addr->index()->as_register(), O7); 1413 src = O7; 1414 } else { 1415 disp_reg = addr->index()->as_pointer_register(); 1416 assert(disp_value == 0, "can't handle 3 operand addresses"); 1417 } 1418 1419 // remember the offset of the load. The patching_epilog must be done 1420 // before the call to add_debug_info, otherwise the PcDescs don't get 1421 // entered in increasing order. 1422 int offset = code_offset(); 1423 1424 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1425 if (disp_reg == noreg) { 1426 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1427 } else { 1428 assert(!unaligned, "can't handle this"); 1429 offset = load(src, disp_reg, to_reg, type, wide); 1430 } 1431 1432 if (patch != NULL) { 1433 patching_epilog(patch, patch_code, src, info); 1434 } 1435 if (info != NULL) add_debug_info_for_null_check(offset, info); 1436 } 1437 1438 1439 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1440 Address addr; 1441 if (src->is_single_word()) { 1442 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1443 } else if (src->is_double_word()) { 1444 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1445 } 1446 1447 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1448 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1449 } 1450 1451 1452 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1453 Address addr; 1454 if (dest->is_single_word()) { 1455 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1456 } else if (dest->is_double_word()) { 1457 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1458 } 1459 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1460 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1461 } 1462 1463 1464 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1465 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1466 if (from_reg->is_double_fpu()) { 1467 // double to double moves 1468 assert(to_reg->is_double_fpu(), "should match"); 1469 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1470 } else { 1471 // float to float moves 1472 assert(to_reg->is_single_fpu(), "should match"); 1473 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1474 } 1475 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1476 if (from_reg->is_double_cpu()) { 1477 #ifdef _LP64 1478 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1479 #else 1480 assert(to_reg->is_double_cpu() && 1481 from_reg->as_register_hi() != to_reg->as_register_lo() && 1482 from_reg->as_register_lo() != to_reg->as_register_hi(), 1483 "should both be long and not overlap"); 1484 // long to long moves 1485 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1486 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1487 #endif 1488 #ifdef _LP64 1489 } else if (to_reg->is_double_cpu()) { 1490 // int to int moves 1491 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1492 #endif 1493 } else { 1494 // int to int moves 1495 __ mov(from_reg->as_register(), to_reg->as_register()); 1496 } 1497 } else { 1498 ShouldNotReachHere(); 1499 } 1500 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1501 __ verify_oop(to_reg->as_register()); 1502 } 1503 } 1504 1505 1506 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1507 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1508 bool wide, bool unaligned) { 1509 assert(type != T_METADATA, "store of metadata ptr not supported"); 1510 LIR_Address* addr = dest->as_address_ptr(); 1511 1512 Register src = addr->base()->as_pointer_register(); 1513 Register disp_reg = noreg; 1514 int disp_value = addr->disp(); 1515 bool needs_patching = (patch_code != lir_patch_none); 1516 1517 if (addr->base()->is_oop_register()) { 1518 __ verify_oop(src); 1519 } 1520 1521 PatchingStub* patch = NULL; 1522 if (needs_patching) { 1523 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1524 assert(!from_reg->is_double_cpu() || 1525 patch_code == lir_patch_none || 1526 patch_code == lir_patch_normal, "patching doesn't match register"); 1527 } 1528 1529 if (addr->index()->is_illegal()) { 1530 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1531 if (needs_patching) { 1532 __ patchable_set(0, O7); 1533 } else { 1534 __ set(disp_value, O7); 1535 } 1536 disp_reg = O7; 1537 } 1538 } else if (unaligned || PatchALot) { 1539 __ add(src, addr->index()->as_register(), O7); 1540 src = O7; 1541 } else { 1542 disp_reg = addr->index()->as_pointer_register(); 1543 assert(disp_value == 0, "can't handle 3 operand addresses"); 1544 } 1545 1546 // remember the offset of the store. The patching_epilog must be done 1547 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1548 // entered in increasing order. 1549 int offset; 1550 1551 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1552 if (disp_reg == noreg) { 1553 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1554 } else { 1555 assert(!unaligned, "can't handle this"); 1556 offset = store(from_reg, src, disp_reg, type, wide); 1557 } 1558 1559 if (patch != NULL) { 1560 patching_epilog(patch, patch_code, src, info); 1561 } 1562 1563 if (info != NULL) add_debug_info_for_null_check(offset, info); 1564 } 1565 1566 1567 void LIR_Assembler::return_op(LIR_Opr result) { 1568 // the poll may need a register so just pick one that isn't the return register 1569 #if defined(TIERED) && !defined(_LP64) 1570 if (result->type_field() == LIR_OprDesc::long_type) { 1571 // Must move the result to G1 1572 // Must leave proper result in O0,O1 and G1 (TIERED only) 1573 __ sllx(I0, 32, G1); // Shift bits into high G1 1574 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1575 __ or3 (I1, G1, G1); // OR 64 bits into G1 1576 #ifdef ASSERT 1577 // mangle it so any problems will show up 1578 __ set(0xdeadbeef, I0); 1579 __ set(0xdeadbeef, I1); 1580 #endif 1581 } 1582 #endif // TIERED 1583 __ set((intptr_t)os::get_polling_page(), L0); 1584 __ relocate(relocInfo::poll_return_type); 1585 __ ld_ptr(L0, 0, G0); 1586 __ ret(); 1587 __ delayed()->restore(); 1588 } 1589 1590 1591 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1592 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1593 if (info != NULL) { 1594 add_debug_info_for_branch(info); 1595 } 1596 int offset = __ offset(); 1597 __ relocate(relocInfo::poll_type); 1598 __ ld_ptr(tmp->as_register(), 0, G0); 1599 return offset; 1600 } 1601 1602 1603 void LIR_Assembler::emit_static_call_stub() { 1604 address call_pc = __ pc(); 1605 address stub = __ start_a_stub(call_stub_size); 1606 if (stub == NULL) { 1607 bailout("static call stub overflow"); 1608 return; 1609 } 1610 1611 int start = __ offset(); 1612 __ relocate(static_stub_Relocation::spec(call_pc)); 1613 1614 __ set_metadata(NULL, G5); 1615 // must be set to -1 at code generation time 1616 AddressLiteral addrlit(-1); 1617 __ jump_to(addrlit, G3); 1618 __ delayed()->nop(); 1619 1620 assert(__ offset() - start <= call_stub_size, "stub too big"); 1621 __ end_a_stub(); 1622 } 1623 1624 1625 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1626 if (opr1->is_single_fpu()) { 1627 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1628 } else if (opr1->is_double_fpu()) { 1629 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1630 } else if (opr1->is_single_cpu()) { 1631 if (opr2->is_constant()) { 1632 switch (opr2->as_constant_ptr()->type()) { 1633 case T_INT: 1634 { jint con = opr2->as_constant_ptr()->as_jint(); 1635 if (Assembler::is_simm13(con)) { 1636 __ cmp(opr1->as_register(), con); 1637 } else { 1638 __ set(con, O7); 1639 __ cmp(opr1->as_register(), O7); 1640 } 1641 } 1642 break; 1643 1644 case T_OBJECT: 1645 // there are only equal/notequal comparisions on objects 1646 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1647 if (con == NULL) { 1648 __ cmp(opr1->as_register(), 0); 1649 } else { 1650 jobject2reg(con, O7); 1651 __ cmp(opr1->as_register(), O7); 1652 } 1653 } 1654 break; 1655 1656 default: 1657 ShouldNotReachHere(); 1658 break; 1659 } 1660 } else { 1661 if (opr2->is_address()) { 1662 LIR_Address * addr = opr2->as_address_ptr(); 1663 BasicType type = addr->type(); 1664 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1665 else __ ld(as_Address(addr), O7); 1666 __ cmp(opr1->as_register(), O7); 1667 } else { 1668 __ cmp(opr1->as_register(), opr2->as_register()); 1669 } 1670 } 1671 } else if (opr1->is_double_cpu()) { 1672 Register xlo = opr1->as_register_lo(); 1673 Register xhi = opr1->as_register_hi(); 1674 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1675 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1676 #ifdef _LP64 1677 __ orcc(xhi, G0, G0); 1678 #else 1679 __ orcc(xhi, xlo, G0); 1680 #endif 1681 } else if (opr2->is_register()) { 1682 Register ylo = opr2->as_register_lo(); 1683 Register yhi = opr2->as_register_hi(); 1684 #ifdef _LP64 1685 __ cmp(xlo, ylo); 1686 #else 1687 __ subcc(xlo, ylo, xlo); 1688 __ subccc(xhi, yhi, xhi); 1689 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1690 __ orcc(xhi, xlo, G0); 1691 } 1692 #endif 1693 } else { 1694 ShouldNotReachHere(); 1695 } 1696 } else if (opr1->is_address()) { 1697 LIR_Address * addr = opr1->as_address_ptr(); 1698 BasicType type = addr->type(); 1699 assert (opr2->is_constant(), "Checking"); 1700 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1701 else __ ld(as_Address(addr), O7); 1702 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1703 } else { 1704 ShouldNotReachHere(); 1705 } 1706 } 1707 1708 1709 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1710 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1711 bool is_unordered_less = (code == lir_ucmp_fd2i); 1712 if (left->is_single_fpu()) { 1713 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1714 } else if (left->is_double_fpu()) { 1715 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1716 } else { 1717 ShouldNotReachHere(); 1718 } 1719 } else if (code == lir_cmp_l2i) { 1720 #ifdef _LP64 1721 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1722 #else 1723 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1724 right->as_register_hi(), right->as_register_lo(), 1725 dst->as_register()); 1726 #endif 1727 } else { 1728 ShouldNotReachHere(); 1729 } 1730 } 1731 1732 1733 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1734 Assembler::Condition acond; 1735 switch (condition) { 1736 case lir_cond_equal: acond = Assembler::equal; break; 1737 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1738 case lir_cond_less: acond = Assembler::less; break; 1739 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1740 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1741 case lir_cond_greater: acond = Assembler::greater; break; 1742 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1743 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1744 default: ShouldNotReachHere(); 1745 }; 1746 1747 if (opr1->is_constant() && opr1->type() == T_INT) { 1748 Register dest = result->as_register(); 1749 // load up first part of constant before branch 1750 // and do the rest in the delay slot. 1751 if (!Assembler::is_simm13(opr1->as_jint())) { 1752 __ sethi(opr1->as_jint(), dest); 1753 } 1754 } else if (opr1->is_constant()) { 1755 const2reg(opr1, result, lir_patch_none, NULL); 1756 } else if (opr1->is_register()) { 1757 reg2reg(opr1, result); 1758 } else if (opr1->is_stack()) { 1759 stack2reg(opr1, result, result->type()); 1760 } else { 1761 ShouldNotReachHere(); 1762 } 1763 Label skip; 1764 #ifdef _LP64 1765 if (type == T_INT) { 1766 __ br(acond, false, Assembler::pt, skip); 1767 } else 1768 #endif 1769 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1770 if (opr1->is_constant() && opr1->type() == T_INT) { 1771 Register dest = result->as_register(); 1772 if (Assembler::is_simm13(opr1->as_jint())) { 1773 __ delayed()->or3(G0, opr1->as_jint(), dest); 1774 } else { 1775 // the sethi has been done above, so just put in the low 10 bits 1776 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1777 } 1778 } else { 1779 // can't do anything useful in the delay slot 1780 __ delayed()->nop(); 1781 } 1782 if (opr2->is_constant()) { 1783 const2reg(opr2, result, lir_patch_none, NULL); 1784 } else if (opr2->is_register()) { 1785 reg2reg(opr2, result); 1786 } else if (opr2->is_stack()) { 1787 stack2reg(opr2, result, result->type()); 1788 } else { 1789 ShouldNotReachHere(); 1790 } 1791 __ bind(skip); 1792 } 1793 1794 1795 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1796 assert(info == NULL, "unused on this code path"); 1797 assert(left->is_register(), "wrong items state"); 1798 assert(dest->is_register(), "wrong items state"); 1799 1800 if (right->is_register()) { 1801 if (dest->is_float_kind()) { 1802 1803 FloatRegister lreg, rreg, res; 1804 FloatRegisterImpl::Width w; 1805 if (right->is_single_fpu()) { 1806 w = FloatRegisterImpl::S; 1807 lreg = left->as_float_reg(); 1808 rreg = right->as_float_reg(); 1809 res = dest->as_float_reg(); 1810 } else { 1811 w = FloatRegisterImpl::D; 1812 lreg = left->as_double_reg(); 1813 rreg = right->as_double_reg(); 1814 res = dest->as_double_reg(); 1815 } 1816 1817 switch (code) { 1818 case lir_add: __ fadd(w, lreg, rreg, res); break; 1819 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1820 case lir_mul: // fall through 1821 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1822 case lir_div: // fall through 1823 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1824 default: ShouldNotReachHere(); 1825 } 1826 1827 } else if (dest->is_double_cpu()) { 1828 #ifdef _LP64 1829 Register dst_lo = dest->as_register_lo(); 1830 Register op1_lo = left->as_pointer_register(); 1831 Register op2_lo = right->as_pointer_register(); 1832 1833 switch (code) { 1834 case lir_add: 1835 __ add(op1_lo, op2_lo, dst_lo); 1836 break; 1837 1838 case lir_sub: 1839 __ sub(op1_lo, op2_lo, dst_lo); 1840 break; 1841 1842 default: ShouldNotReachHere(); 1843 } 1844 #else 1845 Register op1_lo = left->as_register_lo(); 1846 Register op1_hi = left->as_register_hi(); 1847 Register op2_lo = right->as_register_lo(); 1848 Register op2_hi = right->as_register_hi(); 1849 Register dst_lo = dest->as_register_lo(); 1850 Register dst_hi = dest->as_register_hi(); 1851 1852 switch (code) { 1853 case lir_add: 1854 __ addcc(op1_lo, op2_lo, dst_lo); 1855 __ addc (op1_hi, op2_hi, dst_hi); 1856 break; 1857 1858 case lir_sub: 1859 __ subcc(op1_lo, op2_lo, dst_lo); 1860 __ subc (op1_hi, op2_hi, dst_hi); 1861 break; 1862 1863 default: ShouldNotReachHere(); 1864 } 1865 #endif 1866 } else { 1867 assert (right->is_single_cpu(), "Just Checking"); 1868 1869 Register lreg = left->as_register(); 1870 Register res = dest->as_register(); 1871 Register rreg = right->as_register(); 1872 switch (code) { 1873 case lir_add: __ add (lreg, rreg, res); break; 1874 case lir_sub: __ sub (lreg, rreg, res); break; 1875 case lir_mul: __ mulx (lreg, rreg, res); break; 1876 default: ShouldNotReachHere(); 1877 } 1878 } 1879 } else { 1880 assert (right->is_constant(), "must be constant"); 1881 1882 if (dest->is_single_cpu()) { 1883 Register lreg = left->as_register(); 1884 Register res = dest->as_register(); 1885 int simm13 = right->as_constant_ptr()->as_jint(); 1886 1887 switch (code) { 1888 case lir_add: __ add (lreg, simm13, res); break; 1889 case lir_sub: __ sub (lreg, simm13, res); break; 1890 case lir_mul: __ mulx (lreg, simm13, res); break; 1891 default: ShouldNotReachHere(); 1892 } 1893 } else { 1894 Register lreg = left->as_pointer_register(); 1895 Register res = dest->as_register_lo(); 1896 long con = right->as_constant_ptr()->as_jlong(); 1897 assert(Assembler::is_simm13(con), "must be simm13"); 1898 1899 switch (code) { 1900 case lir_add: __ add (lreg, (int)con, res); break; 1901 case lir_sub: __ sub (lreg, (int)con, res); break; 1902 case lir_mul: __ mulx (lreg, (int)con, res); break; 1903 default: ShouldNotReachHere(); 1904 } 1905 } 1906 } 1907 } 1908 1909 1910 void LIR_Assembler::fpop() { 1911 // do nothing 1912 } 1913 1914 1915 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1916 switch (code) { 1917 case lir_sin: 1918 case lir_tan: 1919 case lir_cos: { 1920 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1921 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1922 break; 1923 } 1924 case lir_sqrt: { 1925 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1926 FloatRegister src_reg = value->as_double_reg(); 1927 FloatRegister dst_reg = dest->as_double_reg(); 1928 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1929 break; 1930 } 1931 case lir_abs: { 1932 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1933 FloatRegister src_reg = value->as_double_reg(); 1934 FloatRegister dst_reg = dest->as_double_reg(); 1935 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1936 break; 1937 } 1938 default: { 1939 ShouldNotReachHere(); 1940 break; 1941 } 1942 } 1943 } 1944 1945 1946 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1947 if (right->is_constant()) { 1948 if (dest->is_single_cpu()) { 1949 int simm13 = right->as_constant_ptr()->as_jint(); 1950 switch (code) { 1951 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1952 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1953 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1954 default: ShouldNotReachHere(); 1955 } 1956 } else { 1957 long c = right->as_constant_ptr()->as_jlong(); 1958 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1959 int simm13 = (int)c; 1960 switch (code) { 1961 case lir_logic_and: 1962 #ifndef _LP64 1963 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1964 #endif 1965 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1966 break; 1967 1968 case lir_logic_or: 1969 #ifndef _LP64 1970 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 1971 #endif 1972 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1973 break; 1974 1975 case lir_logic_xor: 1976 #ifndef _LP64 1977 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 1978 #endif 1979 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1980 break; 1981 1982 default: ShouldNotReachHere(); 1983 } 1984 } 1985 } else { 1986 assert(right->is_register(), "right should be in register"); 1987 1988 if (dest->is_single_cpu()) { 1989 switch (code) { 1990 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 1991 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 1992 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 1993 default: ShouldNotReachHere(); 1994 } 1995 } else { 1996 #ifdef _LP64 1997 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1998 left->as_register_lo(); 1999 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 2000 right->as_register_lo(); 2001 2002 switch (code) { 2003 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 2004 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 2005 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 2006 default: ShouldNotReachHere(); 2007 } 2008 #else 2009 switch (code) { 2010 case lir_logic_and: 2011 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2012 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2013 break; 2014 2015 case lir_logic_or: 2016 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2017 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2018 break; 2019 2020 case lir_logic_xor: 2021 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2022 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2023 break; 2024 2025 default: ShouldNotReachHere(); 2026 } 2027 #endif 2028 } 2029 } 2030 } 2031 2032 2033 int LIR_Assembler::shift_amount(BasicType t) { 2034 int elem_size = type2aelembytes(t); 2035 switch (elem_size) { 2036 case 1 : return 0; 2037 case 2 : return 1; 2038 case 4 : return 2; 2039 case 8 : return 3; 2040 } 2041 ShouldNotReachHere(); 2042 return -1; 2043 } 2044 2045 2046 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2047 assert(exceptionOop->as_register() == Oexception, "should match"); 2048 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 2049 2050 info->add_register_oop(exceptionOop); 2051 2052 // reuse the debug info from the safepoint poll for the throw op itself 2053 address pc_for_athrow = __ pc(); 2054 int pc_for_athrow_offset = __ offset(); 2055 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 2056 __ set(pc_for_athrow, Oissuing_pc, rspec); 2057 add_call_info(pc_for_athrow_offset, info); // for exception handler 2058 2059 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 2060 __ delayed()->nop(); 2061 } 2062 2063 2064 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2065 assert(exceptionOop->as_register() == Oexception, "should match"); 2066 2067 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 2068 __ delayed()->nop(); 2069 } 2070 2071 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2072 Register src = op->src()->as_register(); 2073 Register dst = op->dst()->as_register(); 2074 Register src_pos = op->src_pos()->as_register(); 2075 Register dst_pos = op->dst_pos()->as_register(); 2076 Register length = op->length()->as_register(); 2077 Register tmp = op->tmp()->as_register(); 2078 Register tmp2 = O7; 2079 2080 int flags = op->flags(); 2081 ciArrayKlass* default_type = op->expected_type(); 2082 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2083 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2084 2085 #ifdef _LP64 2086 // higher 32bits must be null 2087 __ sra(dst_pos, 0, dst_pos); 2088 __ sra(src_pos, 0, src_pos); 2089 __ sra(length, 0, length); 2090 #endif 2091 2092 // set up the arraycopy stub information 2093 ArrayCopyStub* stub = op->stub(); 2094 2095 // always do stub if no type information is available. it's ok if 2096 // the known type isn't loaded since the code sanity checks 2097 // in debug mode and the type isn't required when we know the exact type 2098 // also check that the type is an array type. 2099 if (op->expected_type() == NULL) { 2100 __ mov(src, O0); 2101 __ mov(src_pos, O1); 2102 __ mov(dst, O2); 2103 __ mov(dst_pos, O3); 2104 __ mov(length, O4); 2105 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2106 2107 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2108 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2109 } else { 2110 #ifndef PRODUCT 2111 if (PrintC1Statistics) { 2112 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 2113 __ inc_counter(counter, G1, G3); 2114 } 2115 #endif 2116 __ call_VM_leaf(tmp, copyfunc_addr); 2117 } 2118 2119 if (copyfunc_addr != NULL) { 2120 __ xor3(O0, -1, tmp); 2121 __ sub(length, tmp, length); 2122 __ add(src_pos, tmp, src_pos); 2123 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2124 __ delayed()->add(dst_pos, tmp, dst_pos); 2125 } else { 2126 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2127 __ delayed()->nop(); 2128 } 2129 __ bind(*stub->continuation()); 2130 return; 2131 } 2132 2133 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2134 2135 // make sure src and dst are non-null and load array length 2136 if (flags & LIR_OpArrayCopy::src_null_check) { 2137 __ tst(src); 2138 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2139 __ delayed()->nop(); 2140 } 2141 2142 if (flags & LIR_OpArrayCopy::dst_null_check) { 2143 __ tst(dst); 2144 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2145 __ delayed()->nop(); 2146 } 2147 2148 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2149 // test src_pos register 2150 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 2151 __ delayed()->nop(); 2152 } 2153 2154 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2155 // test dst_pos register 2156 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 2157 __ delayed()->nop(); 2158 } 2159 2160 if (flags & LIR_OpArrayCopy::length_positive_check) { 2161 // make sure length isn't negative 2162 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 2163 __ delayed()->nop(); 2164 } 2165 2166 if (flags & LIR_OpArrayCopy::src_range_check) { 2167 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2168 __ add(length, src_pos, tmp); 2169 __ cmp(tmp2, tmp); 2170 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2171 __ delayed()->nop(); 2172 } 2173 2174 if (flags & LIR_OpArrayCopy::dst_range_check) { 2175 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2176 __ add(length, dst_pos, tmp); 2177 __ cmp(tmp2, tmp); 2178 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2179 __ delayed()->nop(); 2180 } 2181 2182 int shift = shift_amount(basic_type); 2183 2184 if (flags & LIR_OpArrayCopy::type_check) { 2185 // We don't know the array types are compatible 2186 if (basic_type != T_OBJECT) { 2187 // Simple test for basic type arrays 2188 if (UseCompressedClassPointers) { 2189 // We don't need decode because we just need to compare 2190 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2191 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2192 __ cmp(tmp, tmp2); 2193 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2194 } else { 2195 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2196 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2197 __ cmp(tmp, tmp2); 2198 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2199 } 2200 __ delayed()->nop(); 2201 } else { 2202 // For object arrays, if src is a sub class of dst then we can 2203 // safely do the copy. 2204 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2205 2206 Label cont, slow; 2207 assert_different_registers(tmp, tmp2, G3, G1); 2208 2209 __ load_klass(src, G3); 2210 __ load_klass(dst, G1); 2211 2212 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2213 2214 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2215 __ delayed()->nop(); 2216 2217 __ cmp(G3, 0); 2218 if (copyfunc_addr != NULL) { // use stub if available 2219 // src is not a sub class of dst so we have to do a 2220 // per-element check. 2221 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2222 __ delayed()->nop(); 2223 2224 __ bind(slow); 2225 2226 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2227 if ((flags & mask) != mask) { 2228 // Check that at least both of them object arrays. 2229 assert(flags & mask, "one of the two should be known to be an object array"); 2230 2231 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2232 __ load_klass(src, tmp); 2233 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2234 __ load_klass(dst, tmp); 2235 } 2236 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2237 2238 __ lduw(tmp, lh_offset, tmp2); 2239 2240 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2241 __ set(objArray_lh, tmp); 2242 __ cmp(tmp, tmp2); 2243 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2244 __ delayed()->nop(); 2245 } 2246 2247 Register src_ptr = O0; 2248 Register dst_ptr = O1; 2249 Register len = O2; 2250 Register chk_off = O3; 2251 Register super_k = O4; 2252 2253 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2254 if (shift == 0) { 2255 __ add(src_ptr, src_pos, src_ptr); 2256 } else { 2257 __ sll(src_pos, shift, tmp); 2258 __ add(src_ptr, tmp, src_ptr); 2259 } 2260 2261 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2262 if (shift == 0) { 2263 __ add(dst_ptr, dst_pos, dst_ptr); 2264 } else { 2265 __ sll(dst_pos, shift, tmp); 2266 __ add(dst_ptr, tmp, dst_ptr); 2267 } 2268 __ mov(length, len); 2269 __ load_klass(dst, tmp); 2270 2271 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2272 __ ld_ptr(tmp, ek_offset, super_k); 2273 2274 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2275 __ lduw(super_k, sco_offset, chk_off); 2276 2277 __ call_VM_leaf(tmp, copyfunc_addr); 2278 2279 #ifndef PRODUCT 2280 if (PrintC1Statistics) { 2281 Label failed; 2282 __ br_notnull_short(O0, Assembler::pn, failed); 2283 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2284 __ bind(failed); 2285 } 2286 #endif 2287 2288 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2289 __ delayed()->xor3(O0, -1, tmp); 2290 2291 #ifndef PRODUCT 2292 if (PrintC1Statistics) { 2293 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2294 } 2295 #endif 2296 2297 __ sub(length, tmp, length); 2298 __ add(src_pos, tmp, src_pos); 2299 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2300 __ delayed()->add(dst_pos, tmp, dst_pos); 2301 2302 __ bind(cont); 2303 } else { 2304 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2305 __ delayed()->nop(); 2306 __ bind(cont); 2307 } 2308 } 2309 } 2310 2311 #ifdef ASSERT 2312 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2313 // Sanity check the known type with the incoming class. For the 2314 // primitive case the types must match exactly with src.klass and 2315 // dst.klass each exactly matching the default type. For the 2316 // object array case, if no type check is needed then either the 2317 // dst type is exactly the expected type and the src type is a 2318 // subtype which we can't check or src is the same array as dst 2319 // but not necessarily exactly of type default_type. 2320 Label known_ok, halt; 2321 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2322 if (UseCompressedClassPointers) { 2323 // tmp holds the default type. It currently comes uncompressed after the 2324 // load of a constant, so encode it. 2325 __ encode_klass_not_null(tmp); 2326 // load the raw value of the dst klass, since we will be comparing 2327 // uncompressed values directly. 2328 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2329 if (basic_type != T_OBJECT) { 2330 __ cmp(tmp, tmp2); 2331 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2332 // load the raw value of the src klass. 2333 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2334 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2335 } else { 2336 __ cmp(tmp, tmp2); 2337 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2338 __ delayed()->cmp(src, dst); 2339 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2340 __ delayed()->nop(); 2341 } 2342 } else { 2343 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2344 if (basic_type != T_OBJECT) { 2345 __ cmp(tmp, tmp2); 2346 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2347 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2348 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2349 } else { 2350 __ cmp(tmp, tmp2); 2351 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2352 __ delayed()->cmp(src, dst); 2353 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2354 __ delayed()->nop(); 2355 } 2356 } 2357 __ bind(halt); 2358 __ stop("incorrect type information in arraycopy"); 2359 __ bind(known_ok); 2360 } 2361 #endif 2362 2363 #ifndef PRODUCT 2364 if (PrintC1Statistics) { 2365 address counter = Runtime1::arraycopy_count_address(basic_type); 2366 __ inc_counter(counter, G1, G3); 2367 } 2368 #endif 2369 2370 Register src_ptr = O0; 2371 Register dst_ptr = O1; 2372 Register len = O2; 2373 2374 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2375 if (shift == 0) { 2376 __ add(src_ptr, src_pos, src_ptr); 2377 } else { 2378 __ sll(src_pos, shift, tmp); 2379 __ add(src_ptr, tmp, src_ptr); 2380 } 2381 2382 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2383 if (shift == 0) { 2384 __ add(dst_ptr, dst_pos, dst_ptr); 2385 } else { 2386 __ sll(dst_pos, shift, tmp); 2387 __ add(dst_ptr, tmp, dst_ptr); 2388 } 2389 2390 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2391 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2392 const char *name; 2393 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2394 2395 // arraycopy stubs takes a length in number of elements, so don't scale it. 2396 __ mov(length, len); 2397 __ call_VM_leaf(tmp, entry); 2398 2399 __ bind(*stub->continuation()); 2400 } 2401 2402 2403 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2404 if (dest->is_single_cpu()) { 2405 #ifdef _LP64 2406 if (left->type() == T_OBJECT) { 2407 switch (code) { 2408 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2409 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2410 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2411 default: ShouldNotReachHere(); 2412 } 2413 } else 2414 #endif 2415 switch (code) { 2416 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2417 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2418 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2419 default: ShouldNotReachHere(); 2420 } 2421 } else { 2422 #ifdef _LP64 2423 switch (code) { 2424 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2425 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2426 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2427 default: ShouldNotReachHere(); 2428 } 2429 #else 2430 switch (code) { 2431 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2432 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2433 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2434 default: ShouldNotReachHere(); 2435 } 2436 #endif 2437 } 2438 } 2439 2440 2441 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2442 #ifdef _LP64 2443 if (left->type() == T_OBJECT) { 2444 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2445 Register l = left->as_register(); 2446 Register d = dest->as_register_lo(); 2447 switch (code) { 2448 case lir_shl: __ sllx (l, count, d); break; 2449 case lir_shr: __ srax (l, count, d); break; 2450 case lir_ushr: __ srlx (l, count, d); break; 2451 default: ShouldNotReachHere(); 2452 } 2453 return; 2454 } 2455 #endif 2456 2457 if (dest->is_single_cpu()) { 2458 count = count & 0x1F; // Java spec 2459 switch (code) { 2460 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2461 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2462 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2463 default: ShouldNotReachHere(); 2464 } 2465 } else if (dest->is_double_cpu()) { 2466 count = count & 63; // Java spec 2467 switch (code) { 2468 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2469 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2470 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2471 default: ShouldNotReachHere(); 2472 } 2473 } else { 2474 ShouldNotReachHere(); 2475 } 2476 } 2477 2478 2479 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2480 assert(op->tmp1()->as_register() == G1 && 2481 op->tmp2()->as_register() == G3 && 2482 op->tmp3()->as_register() == G4 && 2483 op->obj()->as_register() == O0 && 2484 op->klass()->as_register() == G5, "must be"); 2485 if (op->init_check()) { 2486 __ ldub(op->klass()->as_register(), 2487 in_bytes(InstanceKlass::init_state_offset()), 2488 op->tmp1()->as_register()); 2489 add_debug_info_for_null_check_here(op->stub()->info()); 2490 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2491 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2492 __ delayed()->nop(); 2493 } 2494 __ allocate_object(op->obj()->as_register(), 2495 op->tmp1()->as_register(), 2496 op->tmp2()->as_register(), 2497 op->tmp3()->as_register(), 2498 op->header_size(), 2499 op->object_size(), 2500 op->klass()->as_register(), 2501 *op->stub()->entry()); 2502 __ bind(*op->stub()->continuation()); 2503 __ verify_oop(op->obj()->as_register()); 2504 } 2505 2506 2507 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2508 assert(op->tmp1()->as_register() == G1 && 2509 op->tmp2()->as_register() == G3 && 2510 op->tmp3()->as_register() == G4 && 2511 op->tmp4()->as_register() == O1 && 2512 op->klass()->as_register() == G5, "must be"); 2513 2514 LP64_ONLY( __ signx(op->len()->as_register()); ) 2515 if (UseSlowPath || 2516 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2517 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2518 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2519 __ delayed()->nop(); 2520 } else { 2521 __ allocate_array(op->obj()->as_register(), 2522 op->len()->as_register(), 2523 op->tmp1()->as_register(), 2524 op->tmp2()->as_register(), 2525 op->tmp3()->as_register(), 2526 arrayOopDesc::header_size(op->type()), 2527 type2aelembytes(op->type()), 2528 op->klass()->as_register(), 2529 *op->stub()->entry()); 2530 } 2531 __ bind(*op->stub()->continuation()); 2532 } 2533 2534 2535 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2536 ciMethodData *md, ciProfileData *data, 2537 Register recv, Register tmp1, Label* update_done) { 2538 uint i; 2539 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2540 Label next_test; 2541 // See if the receiver is receiver[n]. 2542 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2543 mdo_offset_bias); 2544 __ ld_ptr(receiver_addr, tmp1); 2545 __ verify_klass_ptr(tmp1); 2546 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2547 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2548 mdo_offset_bias); 2549 __ ld_ptr(data_addr, tmp1); 2550 __ add(tmp1, DataLayout::counter_increment, tmp1); 2551 __ st_ptr(tmp1, data_addr); 2552 __ ba(*update_done); 2553 __ delayed()->nop(); 2554 __ bind(next_test); 2555 } 2556 2557 // Didn't find receiver; find next empty slot and fill it in 2558 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2559 Label next_test; 2560 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2561 mdo_offset_bias); 2562 __ ld_ptr(recv_addr, tmp1); 2563 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2564 __ st_ptr(recv, recv_addr); 2565 __ set(DataLayout::counter_increment, tmp1); 2566 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2567 mdo_offset_bias); 2568 __ ba(*update_done); 2569 __ delayed()->nop(); 2570 __ bind(next_test); 2571 } 2572 } 2573 2574 2575 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2576 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2577 md = method->method_data_or_null(); 2578 assert(md != NULL, "Sanity"); 2579 data = md->bci_to_data(bci); 2580 assert(data != NULL, "need data for checkcast"); 2581 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2582 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2583 // The offset is large so bias the mdo by the base of the slot so 2584 // that the ld can use simm13s to reference the slots of the data 2585 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2586 } 2587 } 2588 2589 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2590 // we always need a stub for the failure case. 2591 CodeStub* stub = op->stub(); 2592 Register obj = op->object()->as_register(); 2593 Register k_RInfo = op->tmp1()->as_register(); 2594 Register klass_RInfo = op->tmp2()->as_register(); 2595 Register dst = op->result_opr()->as_register(); 2596 Register Rtmp1 = op->tmp3()->as_register(); 2597 ciKlass* k = op->klass(); 2598 2599 2600 if (obj == k_RInfo) { 2601 k_RInfo = klass_RInfo; 2602 klass_RInfo = obj; 2603 } 2604 2605 ciMethodData* md; 2606 ciProfileData* data; 2607 int mdo_offset_bias = 0; 2608 if (op->should_profile()) { 2609 ciMethod* method = op->profiled_method(); 2610 assert(method != NULL, "Should have method"); 2611 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2612 2613 Label not_null; 2614 __ br_notnull_short(obj, Assembler::pn, not_null); 2615 Register mdo = k_RInfo; 2616 Register data_val = Rtmp1; 2617 metadata2reg(md->constant_encoding(), mdo); 2618 if (mdo_offset_bias > 0) { 2619 __ set(mdo_offset_bias, data_val); 2620 __ add(mdo, data_val, mdo); 2621 } 2622 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2623 __ ldub(flags_addr, data_val); 2624 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2625 __ stb(data_val, flags_addr); 2626 __ ba(*obj_is_null); 2627 __ delayed()->nop(); 2628 __ bind(not_null); 2629 } else { 2630 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2631 __ delayed()->nop(); 2632 } 2633 2634 Label profile_cast_failure, profile_cast_success; 2635 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2636 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2637 2638 // patching may screw with our temporaries on sparc, 2639 // so let's do it before loading the class 2640 if (k->is_loaded()) { 2641 metadata2reg(k->constant_encoding(), k_RInfo); 2642 } else { 2643 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2644 } 2645 assert(obj != k_RInfo, "must be different"); 2646 2647 // get object class 2648 // not a safepoint as obj null check happens earlier 2649 __ load_klass(obj, klass_RInfo); 2650 if (op->fast_check()) { 2651 assert_different_registers(klass_RInfo, k_RInfo); 2652 __ cmp(k_RInfo, klass_RInfo); 2653 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2654 __ delayed()->nop(); 2655 } else { 2656 bool need_slow_path = true; 2657 if (k->is_loaded()) { 2658 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2659 need_slow_path = false; 2660 // perform the fast part of the checking logic 2661 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2662 (need_slow_path ? success_target : NULL), 2663 failure_target, NULL, 2664 RegisterOrConstant(k->super_check_offset())); 2665 } else { 2666 // perform the fast part of the checking logic 2667 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2668 failure_target, NULL); 2669 } 2670 if (need_slow_path) { 2671 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2672 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2673 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2674 __ delayed()->nop(); 2675 __ cmp(G3, 0); 2676 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2677 __ delayed()->nop(); 2678 // Fall through to success case 2679 } 2680 } 2681 2682 if (op->should_profile()) { 2683 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2684 assert_different_registers(obj, mdo, recv, tmp1); 2685 __ bind(profile_cast_success); 2686 metadata2reg(md->constant_encoding(), mdo); 2687 if (mdo_offset_bias > 0) { 2688 __ set(mdo_offset_bias, tmp1); 2689 __ add(mdo, tmp1, mdo); 2690 } 2691 __ load_klass(obj, recv); 2692 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2693 // Jump over the failure case 2694 __ ba(*success); 2695 __ delayed()->nop(); 2696 // Cast failure case 2697 __ bind(profile_cast_failure); 2698 metadata2reg(md->constant_encoding(), mdo); 2699 if (mdo_offset_bias > 0) { 2700 __ set(mdo_offset_bias, tmp1); 2701 __ add(mdo, tmp1, mdo); 2702 } 2703 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2704 __ ld_ptr(data_addr, tmp1); 2705 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2706 __ st_ptr(tmp1, data_addr); 2707 __ ba(*failure); 2708 __ delayed()->nop(); 2709 } 2710 __ ba(*success); 2711 __ delayed()->nop(); 2712 } 2713 2714 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2715 LIR_Code code = op->code(); 2716 if (code == lir_store_check) { 2717 Register value = op->object()->as_register(); 2718 Register array = op->array()->as_register(); 2719 Register k_RInfo = op->tmp1()->as_register(); 2720 Register klass_RInfo = op->tmp2()->as_register(); 2721 Register Rtmp1 = op->tmp3()->as_register(); 2722 2723 __ verify_oop(value); 2724 CodeStub* stub = op->stub(); 2725 // check if it needs to be profiled 2726 ciMethodData* md; 2727 ciProfileData* data; 2728 int mdo_offset_bias = 0; 2729 if (op->should_profile()) { 2730 ciMethod* method = op->profiled_method(); 2731 assert(method != NULL, "Should have method"); 2732 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2733 } 2734 Label profile_cast_success, profile_cast_failure, done; 2735 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2736 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2737 2738 if (op->should_profile()) { 2739 Label not_null; 2740 __ br_notnull_short(value, Assembler::pn, not_null); 2741 Register mdo = k_RInfo; 2742 Register data_val = Rtmp1; 2743 metadata2reg(md->constant_encoding(), mdo); 2744 if (mdo_offset_bias > 0) { 2745 __ set(mdo_offset_bias, data_val); 2746 __ add(mdo, data_val, mdo); 2747 } 2748 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2749 __ ldub(flags_addr, data_val); 2750 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2751 __ stb(data_val, flags_addr); 2752 __ ba_short(done); 2753 __ bind(not_null); 2754 } else { 2755 __ br_null_short(value, Assembler::pn, done); 2756 } 2757 add_debug_info_for_null_check_here(op->info_for_exception()); 2758 __ load_klass(array, k_RInfo); 2759 __ load_klass(value, klass_RInfo); 2760 2761 // get instance klass 2762 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo); 2763 // perform the fast part of the checking logic 2764 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2765 2766 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2767 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2768 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2769 __ delayed()->nop(); 2770 __ cmp(G3, 0); 2771 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2772 __ delayed()->nop(); 2773 // fall through to the success case 2774 2775 if (op->should_profile()) { 2776 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2777 assert_different_registers(value, mdo, recv, tmp1); 2778 __ bind(profile_cast_success); 2779 metadata2reg(md->constant_encoding(), mdo); 2780 if (mdo_offset_bias > 0) { 2781 __ set(mdo_offset_bias, tmp1); 2782 __ add(mdo, tmp1, mdo); 2783 } 2784 __ load_klass(value, recv); 2785 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2786 __ ba_short(done); 2787 // Cast failure case 2788 __ bind(profile_cast_failure); 2789 metadata2reg(md->constant_encoding(), mdo); 2790 if (mdo_offset_bias > 0) { 2791 __ set(mdo_offset_bias, tmp1); 2792 __ add(mdo, tmp1, mdo); 2793 } 2794 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2795 __ ld_ptr(data_addr, tmp1); 2796 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2797 __ st_ptr(tmp1, data_addr); 2798 __ ba(*stub->entry()); 2799 __ delayed()->nop(); 2800 } 2801 __ bind(done); 2802 } else if (code == lir_checkcast) { 2803 Register obj = op->object()->as_register(); 2804 Register dst = op->result_opr()->as_register(); 2805 Label success; 2806 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2807 __ bind(success); 2808 __ mov(obj, dst); 2809 } else if (code == lir_instanceof) { 2810 Register obj = op->object()->as_register(); 2811 Register dst = op->result_opr()->as_register(); 2812 Label success, failure, done; 2813 emit_typecheck_helper(op, &success, &failure, &failure); 2814 __ bind(failure); 2815 __ set(0, dst); 2816 __ ba_short(done); 2817 __ bind(success); 2818 __ set(1, dst); 2819 __ bind(done); 2820 } else { 2821 ShouldNotReachHere(); 2822 } 2823 2824 } 2825 2826 2827 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2828 if (op->code() == lir_cas_long) { 2829 assert(VM_Version::supports_cx8(), "wrong machine"); 2830 Register addr = op->addr()->as_pointer_register(); 2831 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2832 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2833 Register new_value_lo = op->new_value()->as_register_lo(); 2834 Register new_value_hi = op->new_value()->as_register_hi(); 2835 Register t1 = op->tmp1()->as_register(); 2836 Register t2 = op->tmp2()->as_register(); 2837 #ifdef _LP64 2838 __ mov(cmp_value_lo, t1); 2839 __ mov(new_value_lo, t2); 2840 // perform the compare and swap operation 2841 __ casx(addr, t1, t2); 2842 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2843 // overwritten with the original value in "addr" and will be equal to t1. 2844 __ cmp(t1, t2); 2845 #else 2846 // move high and low halves of long values into single registers 2847 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2848 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2849 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2850 __ sllx(new_value_hi, 32, t2); 2851 __ srl(new_value_lo, 0, new_value_lo); 2852 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2853 // perform the compare and swap operation 2854 __ casx(addr, t1, t2); 2855 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2856 // overwritten with the original value in "addr" and will be equal to t1. 2857 // Produce icc flag for 32bit. 2858 __ sub(t1, t2, t2); 2859 __ srlx(t2, 32, t1); 2860 __ orcc(t2, t1, G0); 2861 #endif 2862 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2863 Register addr = op->addr()->as_pointer_register(); 2864 Register cmp_value = op->cmp_value()->as_register(); 2865 Register new_value = op->new_value()->as_register(); 2866 Register t1 = op->tmp1()->as_register(); 2867 Register t2 = op->tmp2()->as_register(); 2868 __ mov(cmp_value, t1); 2869 __ mov(new_value, t2); 2870 if (op->code() == lir_cas_obj) { 2871 if (UseCompressedOops) { 2872 __ encode_heap_oop(t1); 2873 __ encode_heap_oop(t2); 2874 __ cas(addr, t1, t2); 2875 } else { 2876 __ cas_ptr(addr, t1, t2); 2877 } 2878 } else { 2879 __ cas(addr, t1, t2); 2880 } 2881 __ cmp(t1, t2); 2882 } else { 2883 Unimplemented(); 2884 } 2885 } 2886 2887 void LIR_Assembler::set_24bit_FPU() { 2888 Unimplemented(); 2889 } 2890 2891 2892 void LIR_Assembler::reset_FPU() { 2893 Unimplemented(); 2894 } 2895 2896 2897 void LIR_Assembler::breakpoint() { 2898 __ breakpoint_trap(); 2899 } 2900 2901 2902 void LIR_Assembler::push(LIR_Opr opr) { 2903 Unimplemented(); 2904 } 2905 2906 2907 void LIR_Assembler::pop(LIR_Opr opr) { 2908 Unimplemented(); 2909 } 2910 2911 2912 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2913 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2914 Register dst = dst_opr->as_register(); 2915 Register reg = mon_addr.base(); 2916 int offset = mon_addr.disp(); 2917 // compute pointer to BasicLock 2918 if (mon_addr.is_simm13()) { 2919 __ add(reg, offset, dst); 2920 } else { 2921 __ set(offset, dst); 2922 __ add(dst, reg, dst); 2923 } 2924 } 2925 2926 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2927 fatal("CRC32 intrinsic is not implemented on this platform"); 2928 } 2929 2930 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2931 Register obj = op->obj_opr()->as_register(); 2932 Register hdr = op->hdr_opr()->as_register(); 2933 Register lock = op->lock_opr()->as_register(); 2934 2935 // obj may not be an oop 2936 if (op->code() == lir_lock) { 2937 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2938 if (UseFastLocking) { 2939 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2940 // add debug info for NullPointerException only if one is possible 2941 if (op->info() != NULL) { 2942 add_debug_info_for_null_check_here(op->info()); 2943 } 2944 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2945 } else { 2946 // always do slow locking 2947 // note: the slow locking code could be inlined here, however if we use 2948 // slow locking, speed doesn't matter anyway and this solution is 2949 // simpler and requires less duplicated code - additionally, the 2950 // slow locking code is the same in either case which simplifies 2951 // debugging 2952 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2953 __ delayed()->nop(); 2954 } 2955 } else { 2956 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2957 if (UseFastLocking) { 2958 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2959 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2960 } else { 2961 // always do slow unlocking 2962 // note: the slow unlocking code could be inlined here, however if we use 2963 // slow unlocking, speed doesn't matter anyway and this solution is 2964 // simpler and requires less duplicated code - additionally, the 2965 // slow unlocking code is the same in either case which simplifies 2966 // debugging 2967 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2968 __ delayed()->nop(); 2969 } 2970 } 2971 __ bind(*op->stub()->continuation()); 2972 } 2973 2974 2975 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2976 ciMethod* method = op->profiled_method(); 2977 int bci = op->profiled_bci(); 2978 ciMethod* callee = op->profiled_callee(); 2979 2980 // Update counter for all call types 2981 ciMethodData* md = method->method_data_or_null(); 2982 assert(md != NULL, "Sanity"); 2983 ciProfileData* data = md->bci_to_data(bci); 2984 assert(data->is_CounterData(), "need CounterData for calls"); 2985 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2986 Register mdo = op->mdo()->as_register(); 2987 #ifdef _LP64 2988 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2989 Register tmp1 = op->tmp1()->as_register_lo(); 2990 #else 2991 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2992 Register tmp1 = op->tmp1()->as_register(); 2993 #endif 2994 metadata2reg(md->constant_encoding(), mdo); 2995 int mdo_offset_bias = 0; 2996 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2997 data->size_in_bytes())) { 2998 // The offset is large so bias the mdo by the base of the slot so 2999 // that the ld can use simm13s to reference the slots of the data 3000 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 3001 __ set(mdo_offset_bias, O7); 3002 __ add(mdo, O7, mdo); 3003 } 3004 3005 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 3006 Bytecodes::Code bc = method->java_code_at_bci(bci); 3007 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 3008 // Perform additional virtual call profiling for invokevirtual and 3009 // invokeinterface bytecodes 3010 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3011 !callee_is_static && // required for optimized MH invokes 3012 C1ProfileVirtualCalls) { 3013 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3014 Register recv = op->recv()->as_register(); 3015 assert_different_registers(mdo, tmp1, recv); 3016 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3017 ciKlass* known_klass = op->known_holder(); 3018 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3019 // We know the type that will be seen at this call site; we can 3020 // statically update the MethodData* rather than needing to do 3021 // dynamic tests on the receiver type 3022 3023 // NOTE: we should probably put a lock around this search to 3024 // avoid collisions by concurrent compilations 3025 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3026 uint i; 3027 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3028 ciKlass* receiver = vc_data->receiver(i); 3029 if (known_klass->equals(receiver)) { 3030 Address data_addr(mdo, md->byte_offset_of_slot(data, 3031 VirtualCallData::receiver_count_offset(i)) - 3032 mdo_offset_bias); 3033 __ ld_ptr(data_addr, tmp1); 3034 __ add(tmp1, DataLayout::counter_increment, tmp1); 3035 __ st_ptr(tmp1, data_addr); 3036 return; 3037 } 3038 } 3039 3040 // Receiver type not found in profile data; select an empty slot 3041 3042 // Note that this is less efficient than it should be because it 3043 // always does a write to the receiver part of the 3044 // VirtualCallData rather than just the first time 3045 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3046 ciKlass* receiver = vc_data->receiver(i); 3047 if (receiver == NULL) { 3048 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 3049 mdo_offset_bias); 3050 metadata2reg(known_klass->constant_encoding(), tmp1); 3051 __ st_ptr(tmp1, recv_addr); 3052 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 3053 mdo_offset_bias); 3054 __ ld_ptr(data_addr, tmp1); 3055 __ add(tmp1, DataLayout::counter_increment, tmp1); 3056 __ st_ptr(tmp1, data_addr); 3057 return; 3058 } 3059 } 3060 } else { 3061 __ load_klass(recv, recv); 3062 Label update_done; 3063 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 3064 // Receiver did not match any saved receiver and there is no empty row for it. 3065 // Increment total counter to indicate polymorphic case. 3066 __ ld_ptr(counter_addr, tmp1); 3067 __ add(tmp1, DataLayout::counter_increment, tmp1); 3068 __ st_ptr(tmp1, counter_addr); 3069 3070 __ bind(update_done); 3071 } 3072 } else { 3073 // Static call 3074 __ ld_ptr(counter_addr, tmp1); 3075 __ add(tmp1, DataLayout::counter_increment, tmp1); 3076 __ st_ptr(tmp1, counter_addr); 3077 } 3078 } 3079 3080 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3081 Register obj = op->obj()->as_register(); 3082 Register tmp1 = op->tmp()->as_pointer_register(); 3083 Register tmp2 = G1; 3084 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3085 ciKlass* exact_klass = op->exact_klass(); 3086 intptr_t current_klass = op->current_klass(); 3087 bool not_null = op->not_null(); 3088 bool no_conflict = op->no_conflict(); 3089 3090 Label update, next, none; 3091 3092 bool do_null = !not_null; 3093 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3094 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3095 3096 assert(do_null || do_update, "why are we here?"); 3097 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3098 3099 __ verify_oop(obj); 3100 3101 if (tmp1 != obj) { 3102 __ mov(obj, tmp1); 3103 } 3104 if (do_null) { 3105 __ br_notnull_short(tmp1, Assembler::pt, update); 3106 if (!TypeEntries::was_null_seen(current_klass)) { 3107 __ ld_ptr(mdo_addr, tmp1); 3108 __ or3(tmp1, TypeEntries::null_seen, tmp1); 3109 __ st_ptr(tmp1, mdo_addr); 3110 } 3111 if (do_update) { 3112 __ ba(next); 3113 __ delayed()->nop(); 3114 } 3115 #ifdef ASSERT 3116 } else { 3117 __ br_notnull_short(tmp1, Assembler::pt, update); 3118 __ stop("unexpect null obj"); 3119 #endif 3120 } 3121 3122 __ bind(update); 3123 3124 if (do_update) { 3125 #ifdef ASSERT 3126 if (exact_klass != NULL) { 3127 Label ok; 3128 __ load_klass(tmp1, tmp1); 3129 metadata2reg(exact_klass->constant_encoding(), tmp2); 3130 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); 3131 __ stop("exact klass and actual klass differ"); 3132 __ bind(ok); 3133 } 3134 #endif 3135 3136 Label do_update; 3137 __ ld_ptr(mdo_addr, tmp2); 3138 3139 if (!no_conflict) { 3140 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3141 if (exact_klass != NULL) { 3142 metadata2reg(exact_klass->constant_encoding(), tmp1); 3143 } else { 3144 __ load_klass(tmp1, tmp1); 3145 } 3146 3147 __ xor3(tmp1, tmp2, tmp1); 3148 __ btst(TypeEntries::type_klass_mask, tmp1); 3149 // klass seen before, nothing to do. The unknown bit may have been 3150 // set already but no need to check. 3151 __ brx(Assembler::zero, false, Assembler::pt, next); 3152 __ delayed()-> 3153 3154 btst(TypeEntries::type_unknown, tmp1); 3155 // already unknown. Nothing to do anymore. 3156 __ brx(Assembler::notZero, false, Assembler::pt, next); 3157 3158 if (TypeEntries::is_type_none(current_klass)) { 3159 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3160 __ brx(Assembler::zero, true, Assembler::pt, do_update); 3161 // first time here. Set profile type. 3162 __ delayed()->or3(tmp2, tmp1, tmp2); 3163 } else { 3164 __ delayed()->nop(); 3165 } 3166 } else { 3167 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3168 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3169 3170 __ btst(TypeEntries::type_unknown, tmp2); 3171 // already unknown. Nothing to do anymore. 3172 __ brx(Assembler::notZero, false, Assembler::pt, next); 3173 __ delayed()->nop(); 3174 } 3175 3176 // different than before. Cannot keep accurate profile. 3177 __ or3(tmp2, TypeEntries::type_unknown, tmp2); 3178 } else { 3179 // There's a single possible klass at this profile point 3180 assert(exact_klass != NULL, "should be"); 3181 if (TypeEntries::is_type_none(current_klass)) { 3182 metadata2reg(exact_klass->constant_encoding(), tmp1); 3183 __ xor3(tmp1, tmp2, tmp1); 3184 __ btst(TypeEntries::type_klass_mask, tmp1); 3185 __ brx(Assembler::zero, false, Assembler::pt, next); 3186 #ifdef ASSERT 3187 3188 { 3189 Label ok; 3190 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3191 __ brx(Assembler::zero, true, Assembler::pt, ok); 3192 __ delayed()->nop(); 3193 3194 __ stop("unexpected profiling mismatch"); 3195 __ bind(ok); 3196 } 3197 // first time here. Set profile type. 3198 __ or3(tmp2, tmp1, tmp2); 3199 #else 3200 // first time here. Set profile type. 3201 __ delayed()->or3(tmp2, tmp1, tmp2); 3202 #endif 3203 3204 } else { 3205 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3206 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3207 3208 // already unknown. Nothing to do anymore. 3209 __ btst(TypeEntries::type_unknown, tmp2); 3210 __ brx(Assembler::notZero, false, Assembler::pt, next); 3211 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); 3212 } 3213 } 3214 3215 __ bind(do_update); 3216 __ st_ptr(tmp2, mdo_addr); 3217 3218 __ bind(next); 3219 } 3220 } 3221 3222 void LIR_Assembler::align_backward_branch_target() { 3223 __ align(OptoLoopAlignment); 3224 } 3225 3226 3227 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 3228 // make sure we are expecting a delay 3229 // this has the side effect of clearing the delay state 3230 // so we can use _masm instead of _masm->delayed() to do the 3231 // code generation. 3232 __ delayed(); 3233 3234 // make sure we only emit one instruction 3235 int offset = code_offset(); 3236 op->delay_op()->emit_code(this); 3237 #ifdef ASSERT 3238 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3239 op->delay_op()->print(); 3240 } 3241 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3242 "only one instruction can go in a delay slot"); 3243 #endif 3244 3245 // we may also be emitting the call info for the instruction 3246 // which we are the delay slot of. 3247 CodeEmitInfo* call_info = op->call_info(); 3248 if (call_info) { 3249 add_call_info(code_offset(), call_info); 3250 } 3251 3252 if (VerifyStackAtCalls) { 3253 _masm->sub(FP, SP, O7); 3254 _masm->cmp(O7, initial_frame_size_in_bytes()); 3255 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3256 } 3257 } 3258 3259 3260 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3261 assert(left->is_register(), "can only handle registers"); 3262 3263 if (left->is_single_cpu()) { 3264 __ neg(left->as_register(), dest->as_register()); 3265 } else if (left->is_single_fpu()) { 3266 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3267 } else if (left->is_double_fpu()) { 3268 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3269 } else { 3270 assert (left->is_double_cpu(), "Must be a long"); 3271 Register Rlow = left->as_register_lo(); 3272 Register Rhi = left->as_register_hi(); 3273 #ifdef _LP64 3274 __ sub(G0, Rlow, dest->as_register_lo()); 3275 #else 3276 __ subcc(G0, Rlow, dest->as_register_lo()); 3277 __ subc (G0, Rhi, dest->as_register_hi()); 3278 #endif 3279 } 3280 } 3281 3282 3283 void LIR_Assembler::fxch(int i) { 3284 Unimplemented(); 3285 } 3286 3287 void LIR_Assembler::fld(int i) { 3288 Unimplemented(); 3289 } 3290 3291 void LIR_Assembler::ffree(int i) { 3292 Unimplemented(); 3293 } 3294 3295 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3296 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3297 3298 // if tmp is invalid, then the function being called doesn't destroy the thread 3299 if (tmp->is_valid()) { 3300 __ save_thread(tmp->as_pointer_register()); 3301 } 3302 __ call(dest, relocInfo::runtime_call_type); 3303 __ delayed()->nop(); 3304 if (info != NULL) { 3305 add_call_info_here(info); 3306 } 3307 if (tmp->is_valid()) { 3308 __ restore_thread(tmp->as_pointer_register()); 3309 } 3310 3311 #ifdef ASSERT 3312 __ verify_thread(); 3313 #endif // ASSERT 3314 } 3315 3316 3317 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3318 #ifdef _LP64 3319 ShouldNotReachHere(); 3320 #endif 3321 3322 NEEDS_CLEANUP; 3323 if (type == T_LONG) { 3324 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3325 3326 // (extended to allow indexed as well as constant displaced for JSR-166) 3327 Register idx = noreg; // contains either constant offset or index 3328 3329 int disp = mem_addr->disp(); 3330 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3331 if (!Assembler::is_simm13(disp)) { 3332 idx = O7; 3333 __ set(disp, idx); 3334 } 3335 } else { 3336 assert(disp == 0, "not both indexed and disp"); 3337 idx = mem_addr->index()->as_register(); 3338 } 3339 3340 int null_check_offset = -1; 3341 3342 Register base = mem_addr->base()->as_register(); 3343 if (src->is_register() && dest->is_address()) { 3344 // G4 is high half, G5 is low half 3345 // clear the top bits of G5, and scale up G4 3346 __ srl (src->as_register_lo(), 0, G5); 3347 __ sllx(src->as_register_hi(), 32, G4); 3348 // combine the two halves into the 64 bits of G4 3349 __ or3(G4, G5, G4); 3350 null_check_offset = __ offset(); 3351 if (idx == noreg) { 3352 __ stx(G4, base, disp); 3353 } else { 3354 __ stx(G4, base, idx); 3355 } 3356 } else if (src->is_address() && dest->is_register()) { 3357 null_check_offset = __ offset(); 3358 if (idx == noreg) { 3359 __ ldx(base, disp, G5); 3360 } else { 3361 __ ldx(base, idx, G5); 3362 } 3363 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3364 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3365 } else { 3366 Unimplemented(); 3367 } 3368 if (info != NULL) { 3369 add_debug_info_for_null_check(null_check_offset, info); 3370 } 3371 3372 } else { 3373 // use normal move for all other volatiles since they don't need 3374 // special handling to remain atomic. 3375 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3376 } 3377 } 3378 3379 void LIR_Assembler::membar() { 3380 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3381 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3382 } 3383 3384 void LIR_Assembler::membar_acquire() { 3385 // no-op on TSO 3386 } 3387 3388 void LIR_Assembler::membar_release() { 3389 // no-op on TSO 3390 } 3391 3392 void LIR_Assembler::membar_loadload() { 3393 // no-op 3394 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3395 } 3396 3397 void LIR_Assembler::membar_storestore() { 3398 // no-op 3399 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3400 } 3401 3402 void LIR_Assembler::membar_loadstore() { 3403 // no-op 3404 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3405 } 3406 3407 void LIR_Assembler::membar_storeload() { 3408 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3409 } 3410 3411 3412 // Pack two sequential registers containing 32 bit values 3413 // into a single 64 bit register. 3414 // src and src->successor() are packed into dst 3415 // src and dst may be the same register. 3416 // Note: src is destroyed 3417 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3418 Register rs = src->as_register(); 3419 Register rd = dst->as_register_lo(); 3420 __ sllx(rs, 32, rs); 3421 __ srl(rs->successor(), 0, rs->successor()); 3422 __ or3(rs, rs->successor(), rd); 3423 } 3424 3425 // Unpack a 64 bit value in a register into 3426 // two sequential registers. 3427 // src is unpacked into dst and dst->successor() 3428 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3429 Register rs = src->as_register_lo(); 3430 Register rd = dst->as_register_hi(); 3431 assert_different_registers(rs, rd, rd->successor()); 3432 __ srlx(rs, 32, rd); 3433 __ srl (rs, 0, rd->successor()); 3434 } 3435 3436 3437 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 3438 LIR_Address* addr = addr_opr->as_address_ptr(); 3439 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); 3440 3441 if (Assembler::is_simm13(addr->disp())) { 3442 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3443 } else { 3444 __ set(addr->disp(), G3_scratch); 3445 __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register()); 3446 } 3447 } 3448 3449 3450 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3451 assert(result_reg->is_register(), "check"); 3452 __ mov(G2_thread, result_reg->as_register()); 3453 } 3454 3455 #ifdef ASSERT 3456 // emit run-time assertion 3457 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3458 assert(op->code() == lir_assert, "must be"); 3459 3460 if (op->in_opr1()->is_valid()) { 3461 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3462 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3463 } else { 3464 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3465 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3466 } 3467 3468 Label ok; 3469 if (op->condition() != lir_cond_always) { 3470 Assembler::Condition acond; 3471 switch (op->condition()) { 3472 case lir_cond_equal: acond = Assembler::equal; break; 3473 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3474 case lir_cond_less: acond = Assembler::less; break; 3475 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3476 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 3477 case lir_cond_greater: acond = Assembler::greater; break; 3478 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 3479 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 3480 default: ShouldNotReachHere(); 3481 }; 3482 __ br(acond, false, Assembler::pt, ok); 3483 __ delayed()->nop(); 3484 } 3485 if (op->halt()) { 3486 const char* str = __ code_string(op->msg()); 3487 __ stop(str); 3488 } else { 3489 breakpoint(); 3490 } 3491 __ bind(ok); 3492 } 3493 #endif 3494 3495 void LIR_Assembler::peephole(LIR_List* lir) { 3496 LIR_OpList* inst = lir->instructions_list(); 3497 for (int i = 0; i < inst->length(); i++) { 3498 LIR_Op* op = inst->at(i); 3499 switch (op->code()) { 3500 case lir_cond_float_branch: 3501 case lir_branch: { 3502 LIR_OpBranch* branch = op->as_OpBranch(); 3503 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3504 LIR_Op* delay_op = NULL; 3505 // we'd like to be able to pull following instructions into 3506 // this slot but we don't know enough to do it safely yet so 3507 // only optimize block to block control flow. 3508 if (LIRFillDelaySlots && branch->block()) { 3509 LIR_Op* prev = inst->at(i - 1); 3510 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3511 // swap previous instruction into delay slot 3512 inst->at_put(i - 1, op); 3513 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3514 #ifndef PRODUCT 3515 if (LIRTracePeephole) { 3516 tty->print_cr("delayed"); 3517 inst->at(i - 1)->print(); 3518 inst->at(i)->print(); 3519 tty->cr(); 3520 } 3521 #endif 3522 continue; 3523 } 3524 } 3525 3526 if (!delay_op) { 3527 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3528 } 3529 inst->insert_before(i + 1, delay_op); 3530 break; 3531 } 3532 case lir_static_call: 3533 case lir_virtual_call: 3534 case lir_icvirtual_call: 3535 case lir_optvirtual_call: 3536 case lir_dynamic_call: { 3537 LIR_Op* prev = inst->at(i - 1); 3538 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3539 (op->code() != lir_virtual_call || 3540 !prev->result_opr()->is_single_cpu() || 3541 prev->result_opr()->as_register() != O0) && 3542 LIR_Assembler::is_single_instruction(prev)) { 3543 // Only moves without info can be put into the delay slot. 3544 // Also don't allow the setup of the receiver in the delay 3545 // slot for vtable calls. 3546 inst->at_put(i - 1, op); 3547 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3548 #ifndef PRODUCT 3549 if (LIRTracePeephole) { 3550 tty->print_cr("delayed"); 3551 inst->at(i - 1)->print(); 3552 inst->at(i)->print(); 3553 tty->cr(); 3554 } 3555 #endif 3556 } else { 3557 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3558 inst->insert_before(i + 1, delay_op); 3559 i++; 3560 } 3561 3562 #if defined(TIERED) && !defined(_LP64) 3563 // fixup the return value from G1 to O0/O1 for long returns. 3564 // It's done here instead of in LIRGenerator because there's 3565 // such a mismatch between the single reg and double reg 3566 // calling convention. 3567 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3568 if (callop->result_opr() == FrameMap::out_long_opr) { 3569 LIR_OpJavaCall* call; 3570 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3571 for (int a = 0; a < arguments->length(); a++) { 3572 arguments[a] = callop->arguments()[a]; 3573 } 3574 if (op->code() == lir_virtual_call) { 3575 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3576 callop->vtable_offset(), arguments, callop->info()); 3577 } else { 3578 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3579 callop->addr(), arguments, callop->info()); 3580 } 3581 inst->at_put(i - 1, call); 3582 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3583 T_LONG, lir_patch_none, NULL)); 3584 } 3585 #endif 3586 break; 3587 } 3588 } 3589 } 3590 } 3591 3592 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3593 LIR_Address* addr = src->as_address_ptr(); 3594 3595 assert(data == dest, "swap uses only 2 operands"); 3596 assert (code == lir_xchg, "no xadd on sparc"); 3597 3598 if (data->type() == T_INT) { 3599 __ swap(as_Address(addr), data->as_register()); 3600 } else if (data->is_oop()) { 3601 Register obj = data->as_register(); 3602 Register narrow = tmp->as_register(); 3603 #ifdef _LP64 3604 assert(UseCompressedOops, "swap is 32bit only"); 3605 __ encode_heap_oop(obj, narrow); 3606 __ swap(as_Address(addr), narrow); 3607 __ decode_heap_oop(narrow, obj); 3608 #else 3609 __ swap(as_Address(addr), obj); 3610 #endif 3611 } else { 3612 ShouldNotReachHere(); 3613 } 3614 } 3615 3616 #undef __