1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciCompilationCache.hpp" 35 #include "ci/ciInstance.hpp" 36 #include "gc_interface/collectedHeap.hpp" 37 #include "memory/barrierSet.hpp" 38 #include "memory/cardTableModRefBS.hpp" 39 #include "nativeInst_x86.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "vmreg_x86.inline.hpp" 43 44 45 // These masks are used to provide 128-bit aligned bitmasks to the XMM 46 // instructions, to allow sign-masking or sign-bit flipping. They allow 47 // fast versions of NegF/NegD and AbsF/AbsD. 48 49 // Note: 'double' and 'long long' have 32-bits alignment on x86. 50 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 51 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 52 // of 128-bits operands for SSE instructions. 53 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 54 // Store the value to a 128-bits operand. 55 operand[0] = lo; 56 operand[1] = hi; 57 return operand; 58 } 59 60 // Buffer for 128-bits masks used by SSE instructions. 61 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 62 63 // Static initialization during VM startup. 64 jlong *LIR_Assembler::float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 65 jlong *LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 66 jlong *LIR_Assembler::float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); 67 jlong *LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); 68 69 70 71 NEEDS_CLEANUP // remove this definitions ? 72 const Register IC_Klass = rax; // where the IC klass is cached 73 const Register SYNC_header = rax; // synchronization header 74 const Register SHIFT_count = rcx; // where count for shift operations must be 75 76 #define __ _masm-> 77 78 79 static void select_different_registers(Register preserve, 80 Register extra, 81 Register &tmp1, 82 Register &tmp2) { 83 if (tmp1 == preserve) { 84 assert_different_registers(tmp1, tmp2, extra); 85 tmp1 = extra; 86 } else if (tmp2 == preserve) { 87 assert_different_registers(tmp1, tmp2, extra); 88 tmp2 = extra; 89 } 90 assert_different_registers(preserve, tmp1, tmp2); 91 } 92 93 94 95 static void select_different_registers(Register preserve, 96 Register extra, 97 Register &tmp1, 98 Register &tmp2, 99 Register &tmp3) { 100 if (tmp1 == preserve) { 101 assert_different_registers(tmp1, tmp2, tmp3, extra); 102 tmp1 = extra; 103 } else if (tmp2 == preserve) { 104 assert_different_registers(tmp1, tmp2, tmp3, extra); 105 tmp2 = extra; 106 } else if (tmp3 == preserve) { 107 assert_different_registers(tmp1, tmp2, tmp3, extra); 108 tmp3 = extra; 109 } 110 assert_different_registers(preserve, tmp1, tmp2, tmp3); 111 } 112 113 114 115 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 116 if (opr->is_constant()) { 117 LIR_Const* constant = opr->as_constant_ptr(); 118 switch (constant->type()) { 119 case T_INT: { 120 return true; 121 } 122 123 default: 124 return false; 125 } 126 } 127 return false; 128 } 129 130 131 LIR_Opr LIR_Assembler::receiverOpr() { 132 return FrameMap::receiver_opr; 133 } 134 135 LIR_Opr LIR_Assembler::osrBufferPointer() { 136 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 137 } 138 139 //--------------fpu register translations----------------------- 140 141 142 address LIR_Assembler::float_constant(float f) { 143 address const_addr = __ float_constant(f); 144 if (const_addr == NULL) { 145 bailout("const section overflow"); 146 return __ code()->consts()->start(); 147 } else { 148 return const_addr; 149 } 150 } 151 152 153 address LIR_Assembler::double_constant(double d) { 154 address const_addr = __ double_constant(d); 155 if (const_addr == NULL) { 156 bailout("const section overflow"); 157 return __ code()->consts()->start(); 158 } else { 159 return const_addr; 160 } 161 } 162 163 164 void LIR_Assembler::set_24bit_FPU() { 165 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); 166 } 167 168 void LIR_Assembler::reset_FPU() { 169 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 170 } 171 172 void LIR_Assembler::fpop() { 173 __ fpop(); 174 } 175 176 void LIR_Assembler::fxch(int i) { 177 __ fxch(i); 178 } 179 180 void LIR_Assembler::fld(int i) { 181 __ fld_s(i); 182 } 183 184 void LIR_Assembler::ffree(int i) { 185 __ ffree(i); 186 } 187 188 void LIR_Assembler::breakpoint() { 189 __ int3(); 190 } 191 192 void LIR_Assembler::push(LIR_Opr opr) { 193 if (opr->is_single_cpu()) { 194 __ push_reg(opr->as_register()); 195 } else if (opr->is_double_cpu()) { 196 NOT_LP64(__ push_reg(opr->as_register_hi())); 197 __ push_reg(opr->as_register_lo()); 198 } else if (opr->is_stack()) { 199 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 200 } else if (opr->is_constant()) { 201 LIR_Const* const_opr = opr->as_constant_ptr(); 202 if (const_opr->type() == T_OBJECT) { 203 __ push_oop(const_opr->as_jobject()); 204 } else if (const_opr->type() == T_INT) { 205 __ push_jint(const_opr->as_jint()); 206 } else { 207 ShouldNotReachHere(); 208 } 209 210 } else { 211 ShouldNotReachHere(); 212 } 213 } 214 215 void LIR_Assembler::pop(LIR_Opr opr) { 216 if (opr->is_single_cpu()) { 217 __ pop_reg(opr->as_register()); 218 } else { 219 ShouldNotReachHere(); 220 } 221 } 222 223 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { 224 return addr->base()->is_illegal() && addr->index()->is_illegal(); 225 } 226 227 //------------------------------------------- 228 229 Address LIR_Assembler::as_Address(LIR_Address* addr) { 230 return as_Address(addr, rscratch1); 231 } 232 233 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 234 if (addr->base()->is_illegal()) { 235 assert(addr->index()->is_illegal(), "must be illegal too"); 236 AddressLiteral laddr((address)addr->disp(), relocInfo::none); 237 if (! __ reachable(laddr)) { 238 __ movptr(tmp, laddr.addr()); 239 Address res(tmp, 0); 240 return res; 241 } else { 242 return __ as_Address(laddr); 243 } 244 } 245 246 Register base = addr->base()->as_pointer_register(); 247 248 if (addr->index()->is_illegal()) { 249 BarrierSet* bs = Universe::heap()->barrier_set(); 250 if (CacheCompiledCode && (bs->kind() == BarrierSet::CardTableModRef)) { 251 // This code may not longer be needed. Shared code generating 252 // card_table_base access without symbolic_relocation information was 253 // replaced by code which reads it from the Thread stucture. Keeping the 254 // code in case inlined access is reactivated (e.g. if more efficient 255 // than Thread access). 256 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 257 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 258 if ((address)addr->disp() == (address)ct->byte_map_base) { 259 AddressLiteral al((address)ct->byte_map_base, symbolic_Relocation::spec(symbolic_Relocation::card_table_reference)); 260 return __ as_Address(al).plus_disp(RegisterOrConstant(addr->base()->as_pointer_register())); 261 } 262 } 263 264 return Address( base, addr->disp()); 265 } else if (addr->index()->is_cpu_register()) { 266 Register index = addr->index()->as_pointer_register(); 267 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 268 } else if (addr->index()->is_constant()) { 269 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 270 assert(Assembler::is_simm32(addr_offset), "must be"); 271 272 return Address(base, addr_offset); 273 } else { 274 Unimplemented(); 275 return Address(); 276 } 277 } 278 279 280 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 281 Address base = as_Address(addr); 282 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 283 } 284 285 286 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 287 return as_Address(addr); 288 } 289 290 291 void LIR_Assembler::osr_entry() { 292 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 293 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 294 ValueStack* entry_state = osr_entry->state(); 295 int number_of_locks = entry_state->locks_size(); 296 297 // we jump here if osr happens with the interpreter 298 // state set up to continue at the beginning of the 299 // loop that triggered osr - in particular, we have 300 // the following registers setup: 301 // 302 // rcx: osr buffer 303 // 304 305 // build frame 306 ciMethod* m = compilation()->method(); 307 __ build_frame(initial_frame_size_in_bytes()); 308 309 // OSR buffer is 310 // 311 // locals[nlocals-1..0] 312 // monitors[0..number_of_locks] 313 // 314 // locals is a direct copy of the interpreter frame so in the osr buffer 315 // so first slot in the local array is the last local from the interpreter 316 // and last slot is local[0] (receiver) from the interpreter 317 // 318 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 319 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 320 // in the interpreter frame (the method lock if a sync method) 321 322 // Initialize monitors in the compiled activation. 323 // rcx: pointer to osr buffer 324 // 325 // All other registers are dead at this point and the locals will be 326 // copied into place by code emitted in the IR. 327 328 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 329 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 330 int monitor_offset = BytesPerWord * method()->max_locals() + 331 (2 * BytesPerWord) * (number_of_locks - 1); 332 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 333 // the OSR buffer using 2 word entries: first the lock and then 334 // the oop. 335 for (int i = 0; i < number_of_locks; i++) { 336 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 337 #ifdef ASSERT 338 // verify the interpreter's monitor has a non-null object 339 { 340 Label L; 341 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); 342 __ jcc(Assembler::notZero, L); 343 __ stop("locked object is NULL"); 344 __ bind(L); 345 } 346 #endif 347 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 348 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 349 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 350 __ movptr(frame_map()->address_for_monitor_object(i), rbx); 351 } 352 } 353 } 354 355 356 // inline cache check; done before the frame is built. 357 int LIR_Assembler::check_icache() { 358 Register receiver = FrameMap::receiver_opr->as_register(); 359 Register ic_klass = IC_Klass; 360 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 361 const bool do_post_padding = VerifyOops || UseCompressedClassPointers; 362 if (!do_post_padding) { 363 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 364 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { 365 __ nop(); 366 } 367 } 368 int offset = __ offset(); 369 __ inline_cache_check(receiver, IC_Klass); 370 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 371 if (do_post_padding) { 372 // force alignment after the cache check. 373 // It's been verified to be aligned if !VerifyOops 374 __ align(CodeEntryAlignment); 375 } 376 return offset; 377 } 378 379 380 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 381 #ifdef ASSERT 382 // check that we do not need an init_mirror_id 383 if (PatchALot || CacheCompiledCode) { 384 IRScope* scope = info->scope(); 385 Bytecodes::Code code = scope->method()->java_code_at_bci(info->stack()->bci()); 386 assert(code != Bytecodes::_new, "Need to add PatchingStub::init_mirror_id"); 387 } 388 #endif 389 jobject o = NULL; 390 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 391 __ movoop(reg, o); 392 patching_epilog(patch, lir_patch_normal, reg, info); 393 } 394 395 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 396 Metadata* o = NULL; 397 PatchingStub::PatchID id = PatchingStub::load_klass_id; 398 if (PatchALot || CacheCompiledCode) { 399 IRScope* scope = info->scope(); 400 Bytecodes::Code code = scope->method()->java_code_at_bci(info->stack()->bci()); 401 switch (code) { 402 case Bytecodes::_new: 403 id = PatchingStub::init_klass_id; 404 break; 405 } 406 } 407 408 PatchingStub* patch = new PatchingStub(_masm, id); 409 __ mov_metadata(reg, o); 410 patching_epilog(patch, lir_patch_normal, reg, info); 411 } 412 413 // This specifies the rsp decrement needed to build the frame 414 int LIR_Assembler::initial_frame_size_in_bytes() { 415 // if rounding, must let FrameMap know! 416 417 // The frame_map records size in slots (32bit word) 418 419 // subtract two words to account for return address and link 420 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 421 } 422 423 424 int LIR_Assembler::emit_exception_handler() { 425 // if the last instruction is a call (typically to do a throw which 426 // is coming at the end after block reordering) the return address 427 // must still point into the code area in order to avoid assertion 428 // failures when searching for the corresponding bci => add a nop 429 // (was bug 5/14/1999 - gri) 430 __ nop(); 431 432 // generate code for exception handler 433 address handler_base = __ start_a_stub(exception_handler_size); 434 if (handler_base == NULL) { 435 // not enough space left for the handler 436 bailout("exception handler overflow"); 437 return -1; 438 } 439 440 int offset = code_offset(); 441 442 // the exception oop and pc are in rax, and rdx 443 // no other registers need to be preserved, so invalidate them 444 __ invalidate_registers(false, true, true, false, true, true); 445 446 // check that there is really an exception 447 __ verify_not_null_oop(rax); 448 449 // search an exception handler (rax: exception oop, rdx: throwing pc) 450 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 451 __ should_not_reach_here(); 452 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 453 __ end_a_stub(); 454 455 return offset; 456 } 457 458 459 // Emit the code to remove the frame from the stack in the exception 460 // unwind path. 461 int LIR_Assembler::emit_unwind_handler() { 462 #ifndef PRODUCT 463 if (CommentedAssembly) { 464 _masm->block_comment("Unwind handler"); 465 } 466 #endif 467 468 int offset = code_offset(); 469 470 // Fetch the exception from TLS and clear out exception related thread state 471 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 472 NOT_LP64(__ get_thread(rsi)); 473 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); 474 __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); 475 __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); 476 477 __ bind(_unwind_handler_entry); 478 __ verify_not_null_oop(rax); 479 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 480 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) 481 } 482 483 // Preform needed unlocking 484 MonitorExitStub* stub = NULL; 485 if (method()->is_synchronized()) { 486 monitor_address(0, FrameMap::rax_opr); 487 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 488 __ unlock_object(rdi, rsi, rax, *stub->entry()); 489 __ bind(*stub->continuation()); 490 } 491 492 if (compilation()->env()->dtrace_method_probes()) { 493 #ifdef _LP64 494 __ mov(rdi, r15_thread); 495 __ mov_metadata(rsi, method()->constant_encoding()); 496 #else 497 __ get_thread(rax); 498 __ movptr(Address(rsp, 0), rax); 499 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 500 #endif 501 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 502 } 503 504 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 505 __ mov(rax, rbx); // Restore the exception 506 } 507 508 // remove the activation and dispatch to the unwind handler 509 __ remove_frame(initial_frame_size_in_bytes()); 510 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 511 512 // Emit the slow path assembly 513 if (stub != NULL) { 514 stub->emit_code(this); 515 } 516 517 return offset; 518 } 519 520 521 int LIR_Assembler::emit_deopt_handler() { 522 // if the last instruction is a call (typically to do a throw which 523 // is coming at the end after block reordering) the return address 524 // must still point into the code area in order to avoid assertion 525 // failures when searching for the corresponding bci => add a nop 526 // (was bug 5/14/1999 - gri) 527 __ nop(); 528 529 // generate code for exception handler 530 address handler_base = __ start_a_stub(deopt_handler_size); 531 if (handler_base == NULL) { 532 // not enough space left for the handler 533 bailout("deopt handler overflow"); 534 return -1; 535 } 536 537 int offset = code_offset(); 538 InternalAddress here(__ pc()); 539 540 __ pushptr(here.addr()); 541 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 542 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 543 __ end_a_stub(); 544 545 return offset; 546 } 547 548 549 // This is the fast version of java.lang.String.compare; it has not 550 // OSR-entry and therefore, we generate a slow version for OSR's 551 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { 552 __ movptr (rbx, rcx); // receiver is in rcx 553 __ movptr (rax, arg1->as_register()); 554 555 // Get addresses of first characters from both Strings 556 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 557 if (java_lang_String::has_offset_field()) { 558 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 559 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); 560 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 561 } else { 562 __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes())); 563 __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 564 } 565 566 // rbx, may be NULL 567 add_debug_info_for_null_check_here(info); 568 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 569 if (java_lang_String::has_offset_field()) { 570 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 571 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); 572 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 573 } else { 574 __ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 575 __ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 576 } 577 578 // compute minimum length (in rax) and difference of lengths (on top of stack) 579 __ mov (rcx, rbx); 580 __ subptr(rbx, rax); // subtract lengths 581 __ push (rbx); // result 582 __ cmov (Assembler::lessEqual, rax, rcx); 583 584 // is minimum length 0? 585 Label noLoop, haveResult; 586 __ testptr (rax, rax); 587 __ jcc (Assembler::zero, noLoop); 588 589 // compare first characters 590 __ load_unsigned_short(rcx, Address(rdi, 0)); 591 __ load_unsigned_short(rbx, Address(rsi, 0)); 592 __ subl(rcx, rbx); 593 __ jcc(Assembler::notZero, haveResult); 594 // starting loop 595 __ decrement(rax); // we already tested index: skip one 596 __ jcc(Assembler::zero, noLoop); 597 598 // set rsi.edi to the end of the arrays (arrays have same length) 599 // negate the index 600 601 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); 602 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); 603 __ negptr(rax); 604 605 // compare the strings in a loop 606 607 Label loop; 608 __ align(wordSize); 609 __ bind(loop); 610 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); 611 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); 612 __ subl(rcx, rbx); 613 __ jcc(Assembler::notZero, haveResult); 614 __ increment(rax); 615 __ jcc(Assembler::notZero, loop); 616 617 // strings are equal up to min length 618 619 __ bind(noLoop); 620 __ pop(rax); 621 return_op(LIR_OprFact::illegalOpr); 622 623 __ bind(haveResult); 624 // leave instruction is going to discard the TOS value 625 __ mov (rax, rcx); // result of call is in rax, 626 } 627 628 629 void LIR_Assembler::return_op(LIR_Opr result) { 630 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 631 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 632 assert(result->fpu() == 0, "result must already be on TOS"); 633 } 634 635 // Pop the stack before the safepoint code 636 __ remove_frame(initial_frame_size_in_bytes()); 637 638 bool result_is_oop = result->is_valid() ? result->is_oop() : false; 639 640 // Note: we do not need to round double result; float result has the right precision 641 // the poll sets the condition code, but no data registers 642 address addr = os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()); 643 644 if (Assembler::is_polling_page_far()) { 645 AddressLiteral polling_page(addr, symbolic_Relocation::spec(symbolic_Relocation::polling_page_reference2)); 646 __ lea(rscratch1, polling_page); 647 __ relocate(relocInfo::poll_return_type); 648 __ testl(rax, Address(rscratch1, 0)); 649 } else { 650 // Note: polling_page_reference handled by correct_polling_page 651 AddressLiteral polling_page(addr, relocInfo::poll_return_type); 652 __ testl(rax, polling_page); 653 } 654 __ ret(0); 655 } 656 657 658 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 659 address addr = os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()); 660 guarantee(info != NULL, "Shouldn't be NULL"); 661 int offset = __ offset(); 662 if (Assembler::is_polling_page_far()) { 663 AddressLiteral polling_page(addr, symbolic_Relocation::spec(symbolic_Relocation::polling_page_reference2)); 664 __ lea(rscratch1, polling_page); 665 offset = __ offset(); 666 add_debug_info_for_branch(info); 667 __ testl(rax, Address(rscratch1, 0)); 668 } else { 669 // Note: polling_page_reference handled by correct_polling_page 670 AddressLiteral polling_page(addr, relocInfo::poll_type); 671 add_debug_info_for_branch(info); 672 __ testl(rax, polling_page); 673 } 674 return offset; 675 } 676 677 678 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 679 if (from_reg != to_reg) __ mov(to_reg, from_reg); 680 } 681 682 void LIR_Assembler::swap_reg(Register a, Register b) { 683 __ xchgptr(a, b); 684 } 685 686 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 687 assert(src->is_constant(), "should not call otherwise"); 688 assert(dest->is_register(), "should not call otherwise"); 689 LIR_Const* c = src->as_constant_ptr(); 690 691 switch (c->type()) { 692 case T_INT: { 693 assert(patch_code == lir_patch_none, "no patching handled here"); 694 __ movl(dest->as_register(), c->as_jint()); 695 break; 696 } 697 698 case T_ADDRESS: { 699 assert(patch_code == lir_patch_none, "no patching handled here"); 700 __ movptr(dest->as_register(), c->as_jint()); 701 break; 702 } 703 704 case T_LONG: { 705 assert(patch_code == lir_patch_none, "no patching handled here"); 706 #ifdef _LP64 707 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 708 #else 709 __ movptr(dest->as_register_lo(), c->as_jint_lo()); 710 __ movptr(dest->as_register_hi(), c->as_jint_hi()); 711 #endif // _LP64 712 break; 713 } 714 715 case T_OBJECT: { 716 if (patch_code != lir_patch_none) { 717 jobject2reg_with_patching(dest->as_register(), info); 718 } else { 719 __ movoop(dest->as_register(), c->as_jobject()); 720 } 721 break; 722 } 723 724 case T_METADATA: { 725 if (patch_code != lir_patch_none) { 726 klass2reg_with_patching(dest->as_register(), info); 727 } else { 728 __ mov_metadata(dest->as_register(), c->as_metadata()); 729 } 730 break; 731 } 732 733 case T_FLOAT: { 734 if (dest->is_single_xmm()) { 735 if (c->is_zero_float()) { 736 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 737 } else { 738 __ movflt(dest->as_xmm_float_reg(), 739 InternalAddress(float_constant(c->as_jfloat()))); 740 } 741 } else { 742 assert(dest->is_single_fpu(), "must be"); 743 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 744 if (c->is_zero_float()) { 745 __ fldz(); 746 } else if (c->is_one_float()) { 747 __ fld1(); 748 } else { 749 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 750 } 751 } 752 break; 753 } 754 755 case T_DOUBLE: { 756 if (dest->is_double_xmm()) { 757 if (c->is_zero_double()) { 758 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 759 } else { 760 __ movdbl(dest->as_xmm_double_reg(), 761 InternalAddress(double_constant(c->as_jdouble()))); 762 } 763 } else { 764 assert(dest->is_double_fpu(), "must be"); 765 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 766 if (c->is_zero_double()) { 767 __ fldz(); 768 } else if (c->is_one_double()) { 769 __ fld1(); 770 } else { 771 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 772 } 773 } 774 break; 775 } 776 777 default: 778 ShouldNotReachHere(); 779 } 780 } 781 782 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 783 assert(src->is_constant(), "should not call otherwise"); 784 assert(dest->is_stack(), "should not call otherwise"); 785 LIR_Const* c = src->as_constant_ptr(); 786 787 switch (c->type()) { 788 case T_INT: // fall through 789 case T_FLOAT: 790 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 791 break; 792 793 case T_ADDRESS: 794 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 795 break; 796 797 case T_OBJECT: 798 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); 799 break; 800 801 case T_LONG: // fall through 802 case T_DOUBLE: 803 #ifdef _LP64 804 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 805 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); 806 #else 807 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 808 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 809 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 810 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 811 #endif // _LP64 812 break; 813 814 default: 815 ShouldNotReachHere(); 816 } 817 } 818 819 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 820 assert(src->is_constant(), "should not call otherwise"); 821 assert(dest->is_address(), "should not call otherwise"); 822 LIR_Const* c = src->as_constant_ptr(); 823 LIR_Address* addr = dest->as_address_ptr(); 824 825 int null_check_here = code_offset(); 826 switch (type) { 827 case T_INT: // fall through 828 case T_FLOAT: 829 __ movl(as_Address(addr), c->as_jint_bits()); 830 break; 831 832 case T_ADDRESS: 833 __ movptr(as_Address(addr), c->as_jint_bits()); 834 break; 835 836 case T_OBJECT: // fall through 837 case T_ARRAY: 838 if (c->as_jobject() == NULL) { 839 if (UseCompressedOops && !wide) { 840 __ movl(as_Address(addr), (int32_t)NULL_WORD); 841 } else { 842 __ movptr(as_Address(addr), NULL_WORD); 843 } 844 } else { 845 if (is_literal_address(addr)) { 846 ShouldNotReachHere(); 847 __ movoop(as_Address(addr, noreg), c->as_jobject()); 848 } else { 849 #ifdef _LP64 850 __ movoop(rscratch1, c->as_jobject()); 851 if (UseCompressedOops && !wide) { 852 __ encode_heap_oop(rscratch1); 853 null_check_here = code_offset(); 854 __ movl(as_Address_lo(addr), rscratch1); 855 } else { 856 null_check_here = code_offset(); 857 __ movptr(as_Address_lo(addr), rscratch1); 858 } 859 #else 860 __ movoop(as_Address(addr), c->as_jobject()); 861 #endif 862 } 863 } 864 break; 865 866 case T_LONG: // fall through 867 case T_DOUBLE: 868 #ifdef _LP64 869 if (is_literal_address(addr)) { 870 ShouldNotReachHere(); 871 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 872 } else { 873 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 874 null_check_here = code_offset(); 875 __ movptr(as_Address_lo(addr), r10); 876 } 877 #else 878 // Always reachable in 32bit so this doesn't produce useless move literal 879 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 880 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 881 #endif // _LP64 882 break; 883 884 case T_BOOLEAN: // fall through 885 case T_BYTE: 886 __ movb(as_Address(addr), c->as_jint() & 0xFF); 887 break; 888 889 case T_CHAR: // fall through 890 case T_SHORT: 891 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 892 break; 893 894 default: 895 ShouldNotReachHere(); 896 }; 897 898 if (info != NULL) { 899 add_debug_info_for_null_check(null_check_here, info); 900 } 901 } 902 903 904 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 905 assert(src->is_register(), "should not call otherwise"); 906 assert(dest->is_register(), "should not call otherwise"); 907 908 // move between cpu-registers 909 if (dest->is_single_cpu()) { 910 #ifdef _LP64 911 if (src->type() == T_LONG) { 912 // Can do LONG -> OBJECT 913 move_regs(src->as_register_lo(), dest->as_register()); 914 return; 915 } 916 #endif 917 assert(src->is_single_cpu(), "must match"); 918 if (src->type() == T_OBJECT) { 919 __ verify_oop(src->as_register()); 920 } 921 move_regs(src->as_register(), dest->as_register()); 922 923 } else if (dest->is_double_cpu()) { 924 #ifdef _LP64 925 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 926 // Surprising to me but we can see move of a long to t_object 927 __ verify_oop(src->as_register()); 928 move_regs(src->as_register(), dest->as_register_lo()); 929 return; 930 } 931 #endif 932 assert(src->is_double_cpu(), "must match"); 933 Register f_lo = src->as_register_lo(); 934 Register f_hi = src->as_register_hi(); 935 Register t_lo = dest->as_register_lo(); 936 Register t_hi = dest->as_register_hi(); 937 #ifdef _LP64 938 assert(f_hi == f_lo, "must be same"); 939 assert(t_hi == t_lo, "must be same"); 940 move_regs(f_lo, t_lo); 941 #else 942 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 943 944 945 if (f_lo == t_hi && f_hi == t_lo) { 946 swap_reg(f_lo, f_hi); 947 } else if (f_hi == t_lo) { 948 assert(f_lo != t_hi, "overwriting register"); 949 move_regs(f_hi, t_hi); 950 move_regs(f_lo, t_lo); 951 } else { 952 assert(f_hi != t_lo, "overwriting register"); 953 move_regs(f_lo, t_lo); 954 move_regs(f_hi, t_hi); 955 } 956 #endif // LP64 957 958 // special moves from fpu-register to xmm-register 959 // necessary for method results 960 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 961 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 962 __ fld_s(Address(rsp, 0)); 963 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 964 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 965 __ fld_d(Address(rsp, 0)); 966 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 967 __ fstp_s(Address(rsp, 0)); 968 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 969 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 970 __ fstp_d(Address(rsp, 0)); 971 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 972 973 // move between xmm-registers 974 } else if (dest->is_single_xmm()) { 975 assert(src->is_single_xmm(), "must match"); 976 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 977 } else if (dest->is_double_xmm()) { 978 assert(src->is_double_xmm(), "must match"); 979 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 980 981 // move between fpu-registers (no instruction necessary because of fpu-stack) 982 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 983 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 984 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 985 } else { 986 ShouldNotReachHere(); 987 } 988 } 989 990 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 991 assert(src->is_register(), "should not call otherwise"); 992 assert(dest->is_stack(), "should not call otherwise"); 993 994 if (src->is_single_cpu()) { 995 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 996 if (type == T_OBJECT || type == T_ARRAY) { 997 __ verify_oop(src->as_register()); 998 __ movptr (dst, src->as_register()); 999 } else if (type == T_METADATA) { 1000 __ movptr (dst, src->as_register()); 1001 } else { 1002 __ movl (dst, src->as_register()); 1003 } 1004 1005 } else if (src->is_double_cpu()) { 1006 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 1007 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 1008 __ movptr (dstLO, src->as_register_lo()); 1009 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 1010 1011 } else if (src->is_single_xmm()) { 1012 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1013 __ movflt(dst_addr, src->as_xmm_float_reg()); 1014 1015 } else if (src->is_double_xmm()) { 1016 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1017 __ movdbl(dst_addr, src->as_xmm_double_reg()); 1018 1019 } else if (src->is_single_fpu()) { 1020 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1021 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1022 if (pop_fpu_stack) __ fstp_s (dst_addr); 1023 else __ fst_s (dst_addr); 1024 1025 } else if (src->is_double_fpu()) { 1026 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1027 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1028 if (pop_fpu_stack) __ fstp_d (dst_addr); 1029 else __ fst_d (dst_addr); 1030 1031 } else { 1032 ShouldNotReachHere(); 1033 } 1034 } 1035 1036 1037 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 1038 LIR_Address* to_addr = dest->as_address_ptr(); 1039 PatchingStub* patch = NULL; 1040 Register compressed_src = rscratch1; 1041 1042 if (type == T_ARRAY || type == T_OBJECT) { 1043 __ verify_oop(src->as_register()); 1044 #ifdef _LP64 1045 if (UseCompressedOops && !wide) { 1046 __ movptr(compressed_src, src->as_register()); 1047 __ encode_heap_oop(compressed_src); 1048 if (patch_code != lir_patch_none) { 1049 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); 1050 } 1051 } 1052 #endif 1053 } 1054 1055 PatchingStub::PatchID patch_id = PatchingStub::access_field_id; 1056 if (patch_code != lir_patch_none) { 1057 if (patch_code == lir_patch_volatile_normal) { 1058 patch_id = PatchingStub::access_volatile_field_id; 1059 } 1060 patch = new PatchingStub(_masm, patch_id); 1061 Address toa = as_Address(to_addr); 1062 assert(toa.disp() != 0, "must have"); 1063 } 1064 1065 int null_check_here = code_offset(); 1066 switch (type) { 1067 case T_FLOAT: { 1068 if (src->is_single_xmm()) { 1069 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1070 } else { 1071 assert(src->is_single_fpu(), "must be"); 1072 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1073 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1074 else __ fst_s (as_Address(to_addr)); 1075 } 1076 break; 1077 } 1078 1079 case T_DOUBLE: { 1080 if (src->is_double_xmm()) { 1081 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1082 } else { 1083 assert(src->is_double_fpu(), "must be"); 1084 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1085 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1086 else __ fst_d (as_Address(to_addr)); 1087 } 1088 break; 1089 } 1090 1091 case T_ARRAY: // fall through 1092 case T_OBJECT: // fall through 1093 if (UseCompressedOops && !wide) { 1094 __ movl(as_Address(to_addr), compressed_src); 1095 } else { 1096 __ movptr(as_Address(to_addr), src->as_register()); 1097 } 1098 break; 1099 case T_METADATA: 1100 // We get here to store a method pointer to the stack to pass to 1101 // a dtrace runtime call. This can't work on 64 bit with 1102 // compressed klass ptrs: T_METADATA can be a compressed klass 1103 // ptr or a 64 bit method pointer. 1104 LP64_ONLY(ShouldNotReachHere()); 1105 __ movptr(as_Address(to_addr), src->as_register()); 1106 break; 1107 case T_ADDRESS: 1108 __ movptr(as_Address(to_addr), src->as_register()); 1109 break; 1110 case T_INT: 1111 __ movl(as_Address(to_addr), src->as_register()); 1112 break; 1113 1114 case T_LONG: { 1115 Register from_lo = src->as_register_lo(); 1116 Register from_hi = src->as_register_hi(); 1117 #ifdef _LP64 1118 __ movptr(as_Address_lo(to_addr), from_lo); 1119 #else 1120 Register base = to_addr->base()->as_register(); 1121 Register index = noreg; 1122 if (to_addr->index()->is_register()) { 1123 index = to_addr->index()->as_register(); 1124 } 1125 if (base == from_lo || index == from_lo) { 1126 assert(base != from_hi, "can't be"); 1127 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1128 __ movl(as_Address_hi(to_addr), from_hi); 1129 if (patch != NULL) { 1130 patching_epilog(patch, lir_patch_high, base, info); 1131 patch = new PatchingStub(_masm, patch_id); 1132 patch_code = lir_patch_low; 1133 } 1134 __ movl(as_Address_lo(to_addr), from_lo); 1135 } else { 1136 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1137 __ movl(as_Address_lo(to_addr), from_lo); 1138 if (patch != NULL) { 1139 patching_epilog(patch, lir_patch_low, base, info); 1140 patch = new PatchingStub(_masm, patch_id); 1141 patch_code = lir_patch_high; 1142 } 1143 __ movl(as_Address_hi(to_addr), from_hi); 1144 } 1145 #endif // _LP64 1146 break; 1147 } 1148 1149 case T_BYTE: // fall through 1150 case T_BOOLEAN: { 1151 Register src_reg = src->as_register(); 1152 Address dst_addr = as_Address(to_addr); 1153 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1154 __ movb(dst_addr, src_reg); 1155 break; 1156 } 1157 1158 case T_CHAR: // fall through 1159 case T_SHORT: 1160 __ movw(as_Address(to_addr), src->as_register()); 1161 break; 1162 1163 default: 1164 ShouldNotReachHere(); 1165 } 1166 if (info != NULL) { 1167 add_debug_info_for_null_check(null_check_here, info); 1168 } 1169 1170 if (patch_code != lir_patch_none) { 1171 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1172 } 1173 } 1174 1175 1176 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1177 assert(src->is_stack(), "should not call otherwise"); 1178 assert(dest->is_register(), "should not call otherwise"); 1179 1180 if (dest->is_single_cpu()) { 1181 if (type == T_ARRAY || type == T_OBJECT) { 1182 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1183 __ verify_oop(dest->as_register()); 1184 } else if (type == T_METADATA) { 1185 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1186 } else { 1187 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1188 } 1189 1190 } else if (dest->is_double_cpu()) { 1191 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1192 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1193 __ movptr(dest->as_register_lo(), src_addr_LO); 1194 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1195 1196 } else if (dest->is_single_xmm()) { 1197 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1198 __ movflt(dest->as_xmm_float_reg(), src_addr); 1199 1200 } else if (dest->is_double_xmm()) { 1201 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1202 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1203 1204 } else if (dest->is_single_fpu()) { 1205 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1206 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1207 __ fld_s(src_addr); 1208 1209 } else if (dest->is_double_fpu()) { 1210 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1211 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1212 __ fld_d(src_addr); 1213 1214 } else { 1215 ShouldNotReachHere(); 1216 } 1217 } 1218 1219 1220 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1221 if (src->is_single_stack()) { 1222 if (type == T_OBJECT || type == T_ARRAY) { 1223 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1224 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1225 } else { 1226 #ifndef _LP64 1227 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1228 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1229 #else 1230 //no pushl on 64bits 1231 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1232 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1233 #endif 1234 } 1235 1236 } else if (src->is_double_stack()) { 1237 #ifdef _LP64 1238 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1239 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1240 #else 1241 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1242 // push and pop the part at src + wordSize, adding wordSize for the previous push 1243 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1244 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1245 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1246 #endif // _LP64 1247 1248 } else { 1249 ShouldNotReachHere(); 1250 } 1251 } 1252 1253 1254 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 1255 assert(src->is_address(), "should not call otherwise"); 1256 assert(dest->is_register(), "should not call otherwise"); 1257 1258 LIR_Address* addr = src->as_address_ptr(); 1259 Address from_addr = as_Address(addr); 1260 1261 if (addr->base()->type() == T_OBJECT) { 1262 __ verify_oop(addr->base()->as_pointer_register()); 1263 } 1264 1265 switch (type) { 1266 case T_BOOLEAN: // fall through 1267 case T_BYTE: // fall through 1268 case T_CHAR: // fall through 1269 case T_SHORT: 1270 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1271 // on pre P6 processors we may get partial register stalls 1272 // so blow away the value of to_rinfo before loading a 1273 // partial word into it. Do it here so that it precedes 1274 // the potential patch point below. 1275 __ xorptr(dest->as_register(), dest->as_register()); 1276 } 1277 break; 1278 } 1279 1280 PatchingStub* patch = NULL; 1281 PatchingStub::PatchID patch_id = PatchingStub::access_field_id; 1282 if (patch_code != lir_patch_none) { 1283 if (patch_code == lir_patch_volatile_normal) { 1284 patch_id = PatchingStub::access_volatile_field_id; 1285 } 1286 patch = new PatchingStub(_masm, patch_id); 1287 assert(from_addr.disp() != 0, "must have"); 1288 } 1289 if (info != NULL) { 1290 add_debug_info_for_null_check_here(info); 1291 } 1292 1293 switch (type) { 1294 case T_FLOAT: { 1295 if (dest->is_single_xmm()) { 1296 __ movflt(dest->as_xmm_float_reg(), from_addr); 1297 } else { 1298 assert(dest->is_single_fpu(), "must be"); 1299 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1300 __ fld_s(from_addr); 1301 } 1302 break; 1303 } 1304 1305 case T_DOUBLE: { 1306 if (dest->is_double_xmm()) { 1307 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1308 } else { 1309 assert(dest->is_double_fpu(), "must be"); 1310 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1311 __ fld_d(from_addr); 1312 } 1313 break; 1314 } 1315 1316 case T_OBJECT: // fall through 1317 case T_ARRAY: // fall through 1318 if (UseCompressedOops && !wide) { 1319 __ movl(dest->as_register(), from_addr); 1320 } else { 1321 __ movptr(dest->as_register(), from_addr); 1322 } 1323 break; 1324 1325 case T_ADDRESS: 1326 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1327 __ movl(dest->as_register(), from_addr); 1328 } else { 1329 __ movptr(dest->as_register(), from_addr); 1330 } 1331 break; 1332 case T_INT: 1333 __ movl(dest->as_register(), from_addr); 1334 break; 1335 1336 case T_LONG: { 1337 Register to_lo = dest->as_register_lo(); 1338 Register to_hi = dest->as_register_hi(); 1339 #ifdef _LP64 1340 __ movptr(to_lo, as_Address_lo(addr)); 1341 #else 1342 Register base = addr->base()->as_register(); 1343 Register index = noreg; 1344 if (addr->index()->is_register()) { 1345 index = addr->index()->as_register(); 1346 } 1347 if ((base == to_lo && index == to_hi) || 1348 (base == to_hi && index == to_lo)) { 1349 // addresses with 2 registers are only formed as a result of 1350 // array access so this code will never have to deal with 1351 // patches or null checks. 1352 assert(info == NULL && patch == NULL, "must be"); 1353 __ lea(to_hi, as_Address(addr)); 1354 __ movl(to_lo, Address(to_hi, 0)); 1355 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1356 } else if (base == to_lo || index == to_lo) { 1357 assert(base != to_hi, "can't be"); 1358 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1359 __ movl(to_hi, as_Address_hi(addr)); 1360 if (patch != NULL) { 1361 patching_epilog(patch, lir_patch_high, base, info); 1362 patch = new PatchingStub(_masm, patch_id); 1363 patch_code = lir_patch_low; 1364 } 1365 __ movl(to_lo, as_Address_lo(addr)); 1366 } else { 1367 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1368 __ movl(to_lo, as_Address_lo(addr)); 1369 if (patch != NULL) { 1370 patching_epilog(patch, lir_patch_low, base, info); 1371 patch = new PatchingStub(_masm, patch_id); 1372 patch_code = lir_patch_high; 1373 } 1374 __ movl(to_hi, as_Address_hi(addr)); 1375 } 1376 #endif // _LP64 1377 break; 1378 } 1379 1380 case T_BOOLEAN: // fall through 1381 case T_BYTE: { 1382 Register dest_reg = dest->as_register(); 1383 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1384 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1385 __ movsbl(dest_reg, from_addr); 1386 } else { 1387 __ movb(dest_reg, from_addr); 1388 __ shll(dest_reg, 24); 1389 __ sarl(dest_reg, 24); 1390 } 1391 break; 1392 } 1393 1394 case T_CHAR: { 1395 Register dest_reg = dest->as_register(); 1396 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1397 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1398 __ movzwl(dest_reg, from_addr); 1399 } else { 1400 __ movw(dest_reg, from_addr); 1401 } 1402 break; 1403 } 1404 1405 case T_SHORT: { 1406 Register dest_reg = dest->as_register(); 1407 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1408 __ movswl(dest_reg, from_addr); 1409 } else { 1410 __ movw(dest_reg, from_addr); 1411 __ shll(dest_reg, 16); 1412 __ sarl(dest_reg, 16); 1413 } 1414 break; 1415 } 1416 1417 default: 1418 ShouldNotReachHere(); 1419 } 1420 1421 if (patch != NULL) { 1422 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1423 } 1424 1425 if (type == T_ARRAY || type == T_OBJECT) { 1426 #ifdef _LP64 1427 if (UseCompressedOops && !wide) { 1428 __ decode_heap_oop(dest->as_register()); 1429 } 1430 #endif 1431 __ verify_oop(dest->as_register()); 1432 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1433 #ifdef _LP64 1434 if (UseCompressedClassPointers) { 1435 __ decode_klass_not_null(dest->as_register()); 1436 } 1437 #endif 1438 } 1439 } 1440 1441 1442 void LIR_Assembler::prefetchr(LIR_Opr src) { 1443 LIR_Address* addr = src->as_address_ptr(); 1444 Address from_addr = as_Address(addr); 1445 1446 if (VM_Version::supports_sse()) { 1447 switch (ReadPrefetchInstr) { 1448 case 0: 1449 __ prefetchnta(from_addr); break; 1450 case 1: 1451 __ prefetcht0(from_addr); break; 1452 case 2: 1453 __ prefetcht2(from_addr); break; 1454 default: 1455 ShouldNotReachHere(); break; 1456 } 1457 } else if (VM_Version::supports_3dnow_prefetch()) { 1458 __ prefetchr(from_addr); 1459 } 1460 } 1461 1462 1463 void LIR_Assembler::prefetchw(LIR_Opr src) { 1464 LIR_Address* addr = src->as_address_ptr(); 1465 Address from_addr = as_Address(addr); 1466 1467 if (VM_Version::supports_sse()) { 1468 switch (AllocatePrefetchInstr) { 1469 case 0: 1470 __ prefetchnta(from_addr); break; 1471 case 1: 1472 __ prefetcht0(from_addr); break; 1473 case 2: 1474 __ prefetcht2(from_addr); break; 1475 case 3: 1476 __ prefetchw(from_addr); break; 1477 default: 1478 ShouldNotReachHere(); break; 1479 } 1480 } else if (VM_Version::supports_3dnow_prefetch()) { 1481 __ prefetchw(from_addr); 1482 } 1483 } 1484 1485 1486 NEEDS_CLEANUP; // This could be static? 1487 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1488 int elem_size = type2aelembytes(type); 1489 switch (elem_size) { 1490 case 1: return Address::times_1; 1491 case 2: return Address::times_2; 1492 case 4: return Address::times_4; 1493 case 8: return Address::times_8; 1494 } 1495 ShouldNotReachHere(); 1496 return Address::no_scale; 1497 } 1498 1499 1500 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1501 switch (op->code()) { 1502 case lir_idiv: 1503 case lir_irem: 1504 arithmetic_idiv(op->code(), 1505 op->in_opr1(), 1506 op->in_opr2(), 1507 op->in_opr3(), 1508 op->result_opr(), 1509 op->info()); 1510 break; 1511 default: ShouldNotReachHere(); break; 1512 } 1513 } 1514 1515 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1516 #ifdef ASSERT 1517 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1518 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1519 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1520 #endif 1521 1522 if (op->cond() == lir_cond_always) { 1523 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1524 __ jmp (*(op->label())); 1525 } else { 1526 Assembler::Condition acond = Assembler::zero; 1527 if (op->code() == lir_cond_float_branch) { 1528 assert(op->ublock() != NULL, "must have unordered successor"); 1529 __ jcc(Assembler::parity, *(op->ublock()->label())); 1530 switch(op->cond()) { 1531 case lir_cond_equal: acond = Assembler::equal; break; 1532 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1533 case lir_cond_less: acond = Assembler::below; break; 1534 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1535 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1536 case lir_cond_greater: acond = Assembler::above; break; 1537 default: ShouldNotReachHere(); 1538 } 1539 } else { 1540 switch (op->cond()) { 1541 case lir_cond_equal: acond = Assembler::equal; break; 1542 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1543 case lir_cond_less: acond = Assembler::less; break; 1544 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1545 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1546 case lir_cond_greater: acond = Assembler::greater; break; 1547 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1548 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1549 default: ShouldNotReachHere(); 1550 } 1551 } 1552 __ jcc(acond,*(op->label())); 1553 } 1554 } 1555 1556 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1557 LIR_Opr src = op->in_opr(); 1558 LIR_Opr dest = op->result_opr(); 1559 1560 switch (op->bytecode()) { 1561 case Bytecodes::_i2l: 1562 #ifdef _LP64 1563 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1564 #else 1565 move_regs(src->as_register(), dest->as_register_lo()); 1566 move_regs(src->as_register(), dest->as_register_hi()); 1567 __ sarl(dest->as_register_hi(), 31); 1568 #endif // LP64 1569 break; 1570 1571 case Bytecodes::_l2i: 1572 #ifdef _LP64 1573 __ movl(dest->as_register(), src->as_register_lo()); 1574 #else 1575 move_regs(src->as_register_lo(), dest->as_register()); 1576 #endif 1577 break; 1578 1579 case Bytecodes::_i2b: 1580 move_regs(src->as_register(), dest->as_register()); 1581 __ sign_extend_byte(dest->as_register()); 1582 break; 1583 1584 case Bytecodes::_i2c: 1585 move_regs(src->as_register(), dest->as_register()); 1586 __ andl(dest->as_register(), 0xFFFF); 1587 break; 1588 1589 case Bytecodes::_i2s: 1590 move_regs(src->as_register(), dest->as_register()); 1591 __ sign_extend_short(dest->as_register()); 1592 break; 1593 1594 1595 case Bytecodes::_f2d: 1596 case Bytecodes::_d2f: 1597 if (dest->is_single_xmm()) { 1598 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1599 } else if (dest->is_double_xmm()) { 1600 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1601 } else { 1602 assert(src->fpu() == dest->fpu(), "register must be equal"); 1603 // do nothing (float result is rounded later through spilling) 1604 } 1605 break; 1606 1607 case Bytecodes::_i2f: 1608 case Bytecodes::_i2d: 1609 if (dest->is_single_xmm()) { 1610 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1611 } else if (dest->is_double_xmm()) { 1612 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1613 } else { 1614 assert(dest->fpu() == 0, "result must be on TOS"); 1615 __ movl(Address(rsp, 0), src->as_register()); 1616 __ fild_s(Address(rsp, 0)); 1617 } 1618 break; 1619 1620 case Bytecodes::_f2i: 1621 case Bytecodes::_d2i: 1622 if (src->is_single_xmm()) { 1623 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1624 } else if (src->is_double_xmm()) { 1625 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1626 } else { 1627 assert(src->fpu() == 0, "input must be on TOS"); 1628 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); 1629 __ fist_s(Address(rsp, 0)); 1630 __ movl(dest->as_register(), Address(rsp, 0)); 1631 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1632 } 1633 1634 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1635 assert(op->stub() != NULL, "stub required"); 1636 __ cmpl(dest->as_register(), 0x80000000); 1637 __ jcc(Assembler::equal, *op->stub()->entry()); 1638 __ bind(*op->stub()->continuation()); 1639 break; 1640 1641 case Bytecodes::_l2f: 1642 case Bytecodes::_l2d: 1643 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1644 assert(dest->fpu() == 0, "result must be on TOS"); 1645 1646 __ movptr(Address(rsp, 0), src->as_register_lo()); 1647 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); 1648 __ fild_d(Address(rsp, 0)); 1649 // float result is rounded later through spilling 1650 break; 1651 1652 case Bytecodes::_f2l: 1653 case Bytecodes::_d2l: 1654 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1655 assert(src->fpu() == 0, "input must be on TOS"); 1656 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1657 1658 // instruction sequence too long to inline it here 1659 { 1660 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); 1661 } 1662 break; 1663 1664 default: ShouldNotReachHere(); 1665 } 1666 } 1667 1668 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1669 if (op->init_check()) { 1670 __ cmpb(Address(op->klass()->as_register(), 1671 InstanceKlass::init_state_offset()), 1672 InstanceKlass::fully_initialized); 1673 add_debug_info_for_null_check_here(op->stub()->info()); 1674 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1675 } 1676 __ allocate_object(op->obj()->as_register(), 1677 op->tmp1()->as_register(), 1678 op->tmp2()->as_register(), 1679 op->header_size(), 1680 op->object_size(), 1681 op->klass()->as_register(), 1682 *op->stub()->entry()); 1683 __ bind(*op->stub()->continuation()); 1684 } 1685 1686 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1687 Register len = op->len()->as_register(); 1688 LP64_ONLY( __ movslq(len, len); ) 1689 1690 if (UseSlowPath || 1691 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1692 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1693 __ jmp(*op->stub()->entry()); 1694 } else { 1695 Register tmp1 = op->tmp1()->as_register(); 1696 Register tmp2 = op->tmp2()->as_register(); 1697 Register tmp3 = op->tmp3()->as_register(); 1698 if (len == tmp1) { 1699 tmp1 = tmp3; 1700 } else if (len == tmp2) { 1701 tmp2 = tmp3; 1702 } else if (len == tmp3) { 1703 // everything is ok 1704 } else { 1705 __ mov(tmp3, len); 1706 } 1707 __ allocate_array(op->obj()->as_register(), 1708 len, 1709 tmp1, 1710 tmp2, 1711 arrayOopDesc::header_size(op->type()), 1712 array_element_size(op->type()), 1713 op->klass()->as_register(), 1714 *op->stub()->entry()); 1715 } 1716 __ bind(*op->stub()->continuation()); 1717 } 1718 1719 void LIR_Assembler::type_profile_helper(Register mdo, 1720 ciMethodData *md, ciProfileData *data, 1721 Register recv, Label* update_done) { 1722 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1723 Label next_test; 1724 // See if the receiver is receiver[n]. 1725 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1726 __ jccb(Assembler::notEqual, next_test); 1727 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1728 __ addptr(data_addr, DataLayout::counter_increment); 1729 __ jmp(*update_done); 1730 __ bind(next_test); 1731 } 1732 1733 // Didn't find receiver; find next empty slot and fill it in 1734 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1735 Label next_test; 1736 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1737 __ cmpptr(recv_addr, (intptr_t)NULL_WORD); 1738 __ jccb(Assembler::notEqual, next_test); 1739 __ movptr(recv_addr, recv); 1740 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1741 __ jmp(*update_done); 1742 __ bind(next_test); 1743 } 1744 } 1745 1746 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1747 // we always need a stub for the failure case. 1748 CodeStub* stub = op->stub(); 1749 Register obj = op->object()->as_register(); 1750 Register k_RInfo = op->tmp1()->as_register(); 1751 Register klass_RInfo = op->tmp2()->as_register(); 1752 Register dst = op->result_opr()->as_register(); 1753 ciKlass* k = op->klass(); 1754 Register Rtmp1 = noreg; 1755 1756 // check if it needs to be profiled 1757 ciMethodData* md; 1758 ciProfileData* data; 1759 1760 if (op->should_profile()) { 1761 ciMethod* method = op->profiled_method(); 1762 assert(method != NULL, "Should have method"); 1763 int bci = op->profiled_bci(); 1764 md = method->method_data_or_null(); 1765 assert(md != NULL, "Sanity"); 1766 data = md->bci_to_data(bci); 1767 assert(data != NULL, "need data for type check"); 1768 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1769 } 1770 Label profile_cast_success, profile_cast_failure; 1771 Label *success_target = op->should_profile() ? &profile_cast_success : success; 1772 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 1773 1774 if (obj == k_RInfo) { 1775 k_RInfo = dst; 1776 } else if (obj == klass_RInfo) { 1777 klass_RInfo = dst; 1778 } 1779 if (k->is_loaded() && !UseCompressedClassPointers) { 1780 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1781 } else { 1782 Rtmp1 = op->tmp3()->as_register(); 1783 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1784 } 1785 1786 assert_different_registers(obj, k_RInfo, klass_RInfo); 1787 1788 __ cmpptr(obj, (int32_t)NULL_WORD); 1789 if (op->should_profile()) { 1790 Label not_null; 1791 __ jccb(Assembler::notEqual, not_null); 1792 // Object is null; update MDO and exit 1793 Register mdo = klass_RInfo; 1794 __ mov_metadata(mdo, md->constant_encoding()); 1795 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1796 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1797 __ orl(data_addr, header_bits); 1798 __ jmp(*obj_is_null); 1799 __ bind(not_null); 1800 } else { 1801 __ jcc(Assembler::equal, *obj_is_null); 1802 } 1803 1804 if (!k->is_loaded()) { 1805 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1806 } else { 1807 #ifdef _LP64 1808 __ mov_metadata(k_RInfo, k->constant_encoding()); 1809 #endif // _LP64 1810 } 1811 __ verify_oop(obj); 1812 1813 if (op->fast_check()) { 1814 // get object class 1815 // not a safepoint as obj null check happens earlier 1816 #ifdef _LP64 1817 if (UseCompressedClassPointers) { 1818 __ load_klass(Rtmp1, obj); 1819 __ cmpptr(k_RInfo, Rtmp1); 1820 } else { 1821 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1822 } 1823 #else 1824 if (k->is_loaded()) { 1825 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1826 } else { 1827 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1828 } 1829 #endif 1830 __ jcc(Assembler::notEqual, *failure_target); 1831 // successful cast, fall through to profile or jump 1832 } else { 1833 // get object class 1834 // not a safepoint as obj null check happens earlier 1835 __ load_klass(klass_RInfo, obj); 1836 if (k->is_loaded()) { 1837 // See if we get an immediate positive hit 1838 #ifdef _LP64 1839 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1840 #else 1841 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1842 #endif // _LP64 1843 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1844 __ jcc(Assembler::notEqual, *failure_target); 1845 // successful cast, fall through to profile or jump 1846 } else { 1847 // See if we get an immediate positive hit 1848 __ jcc(Assembler::equal, *success_target); 1849 // check for self 1850 #ifdef _LP64 1851 __ cmpptr(klass_RInfo, k_RInfo); 1852 #else 1853 __ cmpklass(klass_RInfo, k->constant_encoding()); 1854 #endif // _LP64 1855 __ jcc(Assembler::equal, *success_target); 1856 1857 __ push(klass_RInfo); 1858 #ifdef _LP64 1859 __ push(k_RInfo); 1860 #else 1861 __ pushklass(k->constant_encoding()); 1862 #endif // _LP64 1863 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1864 __ pop(klass_RInfo); 1865 __ pop(klass_RInfo); 1866 // result is a boolean 1867 __ cmpl(klass_RInfo, 0); 1868 __ jcc(Assembler::equal, *failure_target); 1869 // successful cast, fall through to profile or jump 1870 } 1871 } else { 1872 // perform the fast part of the checking logic 1873 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1874 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1875 __ push(klass_RInfo); 1876 __ push(k_RInfo); 1877 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1878 __ pop(klass_RInfo); 1879 __ pop(k_RInfo); 1880 // result is a boolean 1881 __ cmpl(k_RInfo, 0); 1882 __ jcc(Assembler::equal, *failure_target); 1883 // successful cast, fall through to profile or jump 1884 } 1885 } 1886 if (op->should_profile()) { 1887 Register mdo = klass_RInfo, recv = k_RInfo; 1888 __ bind(profile_cast_success); 1889 __ mov_metadata(mdo, md->constant_encoding()); 1890 __ load_klass(recv, obj); 1891 Label update_done; 1892 type_profile_helper(mdo, md, data, recv, success); 1893 __ jmp(*success); 1894 1895 __ bind(profile_cast_failure); 1896 __ mov_metadata(mdo, md->constant_encoding()); 1897 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1898 __ subptr(counter_addr, DataLayout::counter_increment); 1899 __ jmp(*failure); 1900 } 1901 __ jmp(*success); 1902 } 1903 1904 1905 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1906 LIR_Code code = op->code(); 1907 if (code == lir_store_check) { 1908 Register value = op->object()->as_register(); 1909 Register array = op->array()->as_register(); 1910 Register k_RInfo = op->tmp1()->as_register(); 1911 Register klass_RInfo = op->tmp2()->as_register(); 1912 Register Rtmp1 = op->tmp3()->as_register(); 1913 1914 CodeStub* stub = op->stub(); 1915 1916 // check if it needs to be profiled 1917 ciMethodData* md; 1918 ciProfileData* data; 1919 1920 if (op->should_profile()) { 1921 ciMethod* method = op->profiled_method(); 1922 assert(method != NULL, "Should have method"); 1923 int bci = op->profiled_bci(); 1924 md = method->method_data_or_null(); 1925 assert(md != NULL, "Sanity"); 1926 data = md->bci_to_data(bci); 1927 assert(data != NULL, "need data for type check"); 1928 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1929 } 1930 Label profile_cast_success, profile_cast_failure, done; 1931 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1932 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1933 1934 __ cmpptr(value, (int32_t)NULL_WORD); 1935 if (op->should_profile()) { 1936 Label not_null; 1937 __ jccb(Assembler::notEqual, not_null); 1938 // Object is null; update MDO and exit 1939 Register mdo = klass_RInfo; 1940 __ mov_metadata(mdo, md->constant_encoding()); 1941 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1942 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1943 __ orl(data_addr, header_bits); 1944 __ jmp(done); 1945 __ bind(not_null); 1946 } else { 1947 __ jcc(Assembler::equal, done); 1948 } 1949 1950 add_debug_info_for_null_check_here(op->info_for_exception()); 1951 __ load_klass(k_RInfo, array); 1952 __ load_klass(klass_RInfo, value); 1953 1954 // get instance klass (it's already uncompressed) 1955 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1956 // perform the fast part of the checking logic 1957 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1958 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1959 __ push(klass_RInfo); 1960 __ push(k_RInfo); 1961 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1962 __ pop(klass_RInfo); 1963 __ pop(k_RInfo); 1964 // result is a boolean 1965 __ cmpl(k_RInfo, 0); 1966 __ jcc(Assembler::equal, *failure_target); 1967 // fall through to the success case 1968 1969 if (op->should_profile()) { 1970 Register mdo = klass_RInfo, recv = k_RInfo; 1971 __ bind(profile_cast_success); 1972 __ mov_metadata(mdo, md->constant_encoding()); 1973 __ load_klass(recv, value); 1974 Label update_done; 1975 type_profile_helper(mdo, md, data, recv, &done); 1976 __ jmpb(done); 1977 1978 __ bind(profile_cast_failure); 1979 __ mov_metadata(mdo, md->constant_encoding()); 1980 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1981 __ subptr(counter_addr, DataLayout::counter_increment); 1982 __ jmp(*stub->entry()); 1983 } 1984 1985 __ bind(done); 1986 } else 1987 if (code == lir_checkcast) { 1988 Register obj = op->object()->as_register(); 1989 Register dst = op->result_opr()->as_register(); 1990 Label success; 1991 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1992 __ bind(success); 1993 if (dst != obj) { 1994 __ mov(dst, obj); 1995 } 1996 } else 1997 if (code == lir_instanceof) { 1998 Register obj = op->object()->as_register(); 1999 Register dst = op->result_opr()->as_register(); 2000 Label success, failure, done; 2001 emit_typecheck_helper(op, &success, &failure, &failure); 2002 __ bind(failure); 2003 __ xorptr(dst, dst); 2004 __ jmpb(done); 2005 __ bind(success); 2006 __ movptr(dst, 1); 2007 __ bind(done); 2008 } else { 2009 ShouldNotReachHere(); 2010 } 2011 2012 } 2013 2014 2015 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2016 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { 2017 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 2018 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 2019 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 2020 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 2021 Register addr = op->addr()->as_register(); 2022 if (os::is_MP()) { 2023 __ lock(); 2024 } 2025 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 2026 2027 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 2028 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 2029 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2030 Register newval = op->new_value()->as_register(); 2031 Register cmpval = op->cmp_value()->as_register(); 2032 assert(cmpval == rax, "wrong register"); 2033 assert(newval != NULL, "new val must be register"); 2034 assert(cmpval != newval, "cmp and new values must be in different registers"); 2035 assert(cmpval != addr, "cmp and addr must be in different registers"); 2036 assert(newval != addr, "new value and addr must be in different registers"); 2037 2038 if ( op->code() == lir_cas_obj) { 2039 #ifdef _LP64 2040 if (UseCompressedOops) { 2041 __ encode_heap_oop(cmpval); 2042 __ mov(rscratch1, newval); 2043 __ encode_heap_oop(rscratch1); 2044 if (os::is_MP()) { 2045 __ lock(); 2046 } 2047 // cmpval (rax) is implicitly used by this instruction 2048 __ cmpxchgl(rscratch1, Address(addr, 0)); 2049 } else 2050 #endif 2051 { 2052 if (os::is_MP()) { 2053 __ lock(); 2054 } 2055 __ cmpxchgptr(newval, Address(addr, 0)); 2056 } 2057 } else { 2058 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2059 if (os::is_MP()) { 2060 __ lock(); 2061 } 2062 __ cmpxchgl(newval, Address(addr, 0)); 2063 } 2064 #ifdef _LP64 2065 } else if (op->code() == lir_cas_long) { 2066 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2067 Register newval = op->new_value()->as_register_lo(); 2068 Register cmpval = op->cmp_value()->as_register_lo(); 2069 assert(cmpval == rax, "wrong register"); 2070 assert(newval != NULL, "new val must be register"); 2071 assert(cmpval != newval, "cmp and new values must be in different registers"); 2072 assert(cmpval != addr, "cmp and addr must be in different registers"); 2073 assert(newval != addr, "new value and addr must be in different registers"); 2074 if (os::is_MP()) { 2075 __ lock(); 2076 } 2077 __ cmpxchgq(newval, Address(addr, 0)); 2078 #endif // _LP64 2079 } else { 2080 Unimplemented(); 2081 } 2082 } 2083 2084 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 2085 Assembler::Condition acond, ncond; 2086 switch (condition) { 2087 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2088 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2089 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2090 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2091 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2092 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2093 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2094 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2095 default: ShouldNotReachHere(); 2096 } 2097 2098 if (opr1->is_cpu_register()) { 2099 reg2reg(opr1, result); 2100 } else if (opr1->is_stack()) { 2101 stack2reg(opr1, result, result->type()); 2102 } else if (opr1->is_constant()) { 2103 const2reg(opr1, result, lir_patch_none, NULL); 2104 } else { 2105 ShouldNotReachHere(); 2106 } 2107 2108 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2109 // optimized version that does not require a branch 2110 if (opr2->is_single_cpu()) { 2111 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2112 __ cmov(ncond, result->as_register(), opr2->as_register()); 2113 } else if (opr2->is_double_cpu()) { 2114 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2115 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2116 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2117 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2118 } else if (opr2->is_single_stack()) { 2119 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2120 } else if (opr2->is_double_stack()) { 2121 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2122 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2123 } else { 2124 ShouldNotReachHere(); 2125 } 2126 2127 } else { 2128 Label skip; 2129 __ jcc (acond, skip); 2130 if (opr2->is_cpu_register()) { 2131 reg2reg(opr2, result); 2132 } else if (opr2->is_stack()) { 2133 stack2reg(opr2, result, result->type()); 2134 } else if (opr2->is_constant()) { 2135 const2reg(opr2, result, lir_patch_none, NULL); 2136 } else { 2137 ShouldNotReachHere(); 2138 } 2139 __ bind(skip); 2140 } 2141 } 2142 2143 2144 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2145 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2146 2147 if (left->is_single_cpu()) { 2148 assert(left == dest, "left and dest must be equal"); 2149 Register lreg = left->as_register(); 2150 2151 if (right->is_single_cpu()) { 2152 // cpu register - cpu register 2153 Register rreg = right->as_register(); 2154 switch (code) { 2155 case lir_add: __ addl (lreg, rreg); break; 2156 case lir_sub: __ subl (lreg, rreg); break; 2157 case lir_mul: __ imull(lreg, rreg); break; 2158 default: ShouldNotReachHere(); 2159 } 2160 2161 } else if (right->is_stack()) { 2162 // cpu register - stack 2163 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2164 switch (code) { 2165 case lir_add: __ addl(lreg, raddr); break; 2166 case lir_sub: __ subl(lreg, raddr); break; 2167 default: ShouldNotReachHere(); 2168 } 2169 2170 } else if (right->is_constant()) { 2171 // cpu register - constant 2172 jint c = right->as_constant_ptr()->as_jint(); 2173 switch (code) { 2174 case lir_add: { 2175 __ incrementl(lreg, c); 2176 break; 2177 } 2178 case lir_sub: { 2179 __ decrementl(lreg, c); 2180 break; 2181 } 2182 default: ShouldNotReachHere(); 2183 } 2184 2185 } else { 2186 ShouldNotReachHere(); 2187 } 2188 2189 } else if (left->is_double_cpu()) { 2190 assert(left == dest, "left and dest must be equal"); 2191 Register lreg_lo = left->as_register_lo(); 2192 Register lreg_hi = left->as_register_hi(); 2193 2194 if (right->is_double_cpu()) { 2195 // cpu register - cpu register 2196 Register rreg_lo = right->as_register_lo(); 2197 Register rreg_hi = right->as_register_hi(); 2198 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2199 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2200 switch (code) { 2201 case lir_add: 2202 __ addptr(lreg_lo, rreg_lo); 2203 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2204 break; 2205 case lir_sub: 2206 __ subptr(lreg_lo, rreg_lo); 2207 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2208 break; 2209 case lir_mul: 2210 #ifdef _LP64 2211 __ imulq(lreg_lo, rreg_lo); 2212 #else 2213 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2214 __ imull(lreg_hi, rreg_lo); 2215 __ imull(rreg_hi, lreg_lo); 2216 __ addl (rreg_hi, lreg_hi); 2217 __ mull (rreg_lo); 2218 __ addl (lreg_hi, rreg_hi); 2219 #endif // _LP64 2220 break; 2221 default: 2222 ShouldNotReachHere(); 2223 } 2224 2225 } else if (right->is_constant()) { 2226 // cpu register - constant 2227 #ifdef _LP64 2228 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2229 __ movptr(r10, (intptr_t) c); 2230 switch (code) { 2231 case lir_add: 2232 __ addptr(lreg_lo, r10); 2233 break; 2234 case lir_sub: 2235 __ subptr(lreg_lo, r10); 2236 break; 2237 default: 2238 ShouldNotReachHere(); 2239 } 2240 #else 2241 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2242 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2243 switch (code) { 2244 case lir_add: 2245 __ addptr(lreg_lo, c_lo); 2246 __ adcl(lreg_hi, c_hi); 2247 break; 2248 case lir_sub: 2249 __ subptr(lreg_lo, c_lo); 2250 __ sbbl(lreg_hi, c_hi); 2251 break; 2252 default: 2253 ShouldNotReachHere(); 2254 } 2255 #endif // _LP64 2256 2257 } else { 2258 ShouldNotReachHere(); 2259 } 2260 2261 } else if (left->is_single_xmm()) { 2262 assert(left == dest, "left and dest must be equal"); 2263 XMMRegister lreg = left->as_xmm_float_reg(); 2264 2265 if (right->is_single_xmm()) { 2266 XMMRegister rreg = right->as_xmm_float_reg(); 2267 switch (code) { 2268 case lir_add: __ addss(lreg, rreg); break; 2269 case lir_sub: __ subss(lreg, rreg); break; 2270 case lir_mul_strictfp: // fall through 2271 case lir_mul: __ mulss(lreg, rreg); break; 2272 case lir_div_strictfp: // fall through 2273 case lir_div: __ divss(lreg, rreg); break; 2274 default: ShouldNotReachHere(); 2275 } 2276 } else { 2277 Address raddr; 2278 if (right->is_single_stack()) { 2279 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2280 } else if (right->is_constant()) { 2281 // hack for now 2282 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2283 } else { 2284 ShouldNotReachHere(); 2285 } 2286 switch (code) { 2287 case lir_add: __ addss(lreg, raddr); break; 2288 case lir_sub: __ subss(lreg, raddr); break; 2289 case lir_mul_strictfp: // fall through 2290 case lir_mul: __ mulss(lreg, raddr); break; 2291 case lir_div_strictfp: // fall through 2292 case lir_div: __ divss(lreg, raddr); break; 2293 default: ShouldNotReachHere(); 2294 } 2295 } 2296 2297 } else if (left->is_double_xmm()) { 2298 assert(left == dest, "left and dest must be equal"); 2299 2300 XMMRegister lreg = left->as_xmm_double_reg(); 2301 if (right->is_double_xmm()) { 2302 XMMRegister rreg = right->as_xmm_double_reg(); 2303 switch (code) { 2304 case lir_add: __ addsd(lreg, rreg); break; 2305 case lir_sub: __ subsd(lreg, rreg); break; 2306 case lir_mul_strictfp: // fall through 2307 case lir_mul: __ mulsd(lreg, rreg); break; 2308 case lir_div_strictfp: // fall through 2309 case lir_div: __ divsd(lreg, rreg); break; 2310 default: ShouldNotReachHere(); 2311 } 2312 } else { 2313 Address raddr; 2314 if (right->is_double_stack()) { 2315 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2316 } else if (right->is_constant()) { 2317 // hack for now 2318 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2319 } else { 2320 ShouldNotReachHere(); 2321 } 2322 switch (code) { 2323 case lir_add: __ addsd(lreg, raddr); break; 2324 case lir_sub: __ subsd(lreg, raddr); break; 2325 case lir_mul_strictfp: // fall through 2326 case lir_mul: __ mulsd(lreg, raddr); break; 2327 case lir_div_strictfp: // fall through 2328 case lir_div: __ divsd(lreg, raddr); break; 2329 default: ShouldNotReachHere(); 2330 } 2331 } 2332 2333 } else if (left->is_single_fpu()) { 2334 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2335 2336 if (right->is_single_fpu()) { 2337 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2338 2339 } else { 2340 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2341 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2342 2343 Address raddr; 2344 if (right->is_single_stack()) { 2345 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2346 } else if (right->is_constant()) { 2347 address const_addr = float_constant(right->as_jfloat()); 2348 assert(const_addr != NULL, "incorrect float/double constant maintainance"); 2349 // hack for now 2350 raddr = __ as_Address(InternalAddress(const_addr)); 2351 } else { 2352 ShouldNotReachHere(); 2353 } 2354 2355 switch (code) { 2356 case lir_add: __ fadd_s(raddr); break; 2357 case lir_sub: __ fsub_s(raddr); break; 2358 case lir_mul_strictfp: // fall through 2359 case lir_mul: __ fmul_s(raddr); break; 2360 case lir_div_strictfp: // fall through 2361 case lir_div: __ fdiv_s(raddr); break; 2362 default: ShouldNotReachHere(); 2363 } 2364 } 2365 2366 } else if (left->is_double_fpu()) { 2367 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2368 2369 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2370 // Double values require special handling for strictfp mul/div on x86 2371 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); 2372 __ fmulp(left->fpu_regnrLo() + 1); 2373 } 2374 2375 if (right->is_double_fpu()) { 2376 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2377 2378 } else { 2379 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2380 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2381 2382 Address raddr; 2383 if (right->is_double_stack()) { 2384 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2385 } else if (right->is_constant()) { 2386 // hack for now 2387 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2388 } else { 2389 ShouldNotReachHere(); 2390 } 2391 2392 switch (code) { 2393 case lir_add: __ fadd_d(raddr); break; 2394 case lir_sub: __ fsub_d(raddr); break; 2395 case lir_mul_strictfp: // fall through 2396 case lir_mul: __ fmul_d(raddr); break; 2397 case lir_div_strictfp: // fall through 2398 case lir_div: __ fdiv_d(raddr); break; 2399 default: ShouldNotReachHere(); 2400 } 2401 } 2402 2403 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2404 // Double values require special handling for strictfp mul/div on x86 2405 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); 2406 __ fmulp(dest->fpu_regnrLo() + 1); 2407 } 2408 2409 } else if (left->is_single_stack() || left->is_address()) { 2410 assert(left == dest, "left and dest must be equal"); 2411 2412 Address laddr; 2413 if (left->is_single_stack()) { 2414 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2415 } else if (left->is_address()) { 2416 laddr = as_Address(left->as_address_ptr()); 2417 } else { 2418 ShouldNotReachHere(); 2419 } 2420 2421 if (right->is_single_cpu()) { 2422 Register rreg = right->as_register(); 2423 switch (code) { 2424 case lir_add: __ addl(laddr, rreg); break; 2425 case lir_sub: __ subl(laddr, rreg); break; 2426 default: ShouldNotReachHere(); 2427 } 2428 } else if (right->is_constant()) { 2429 jint c = right->as_constant_ptr()->as_jint(); 2430 switch (code) { 2431 case lir_add: { 2432 __ incrementl(laddr, c); 2433 break; 2434 } 2435 case lir_sub: { 2436 __ decrementl(laddr, c); 2437 break; 2438 } 2439 default: ShouldNotReachHere(); 2440 } 2441 } else { 2442 ShouldNotReachHere(); 2443 } 2444 2445 } else { 2446 ShouldNotReachHere(); 2447 } 2448 } 2449 2450 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2451 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2452 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2453 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2454 2455 bool left_is_tos = (left_index == 0); 2456 bool dest_is_tos = (dest_index == 0); 2457 int non_tos_index = (left_is_tos ? right_index : left_index); 2458 2459 switch (code) { 2460 case lir_add: 2461 if (pop_fpu_stack) __ faddp(non_tos_index); 2462 else if (dest_is_tos) __ fadd (non_tos_index); 2463 else __ fadda(non_tos_index); 2464 break; 2465 2466 case lir_sub: 2467 if (left_is_tos) { 2468 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2469 else if (dest_is_tos) __ fsub (non_tos_index); 2470 else __ fsubra(non_tos_index); 2471 } else { 2472 if (pop_fpu_stack) __ fsubp (non_tos_index); 2473 else if (dest_is_tos) __ fsubr (non_tos_index); 2474 else __ fsuba (non_tos_index); 2475 } 2476 break; 2477 2478 case lir_mul_strictfp: // fall through 2479 case lir_mul: 2480 if (pop_fpu_stack) __ fmulp(non_tos_index); 2481 else if (dest_is_tos) __ fmul (non_tos_index); 2482 else __ fmula(non_tos_index); 2483 break; 2484 2485 case lir_div_strictfp: // fall through 2486 case lir_div: 2487 if (left_is_tos) { 2488 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2489 else if (dest_is_tos) __ fdiv (non_tos_index); 2490 else __ fdivra(non_tos_index); 2491 } else { 2492 if (pop_fpu_stack) __ fdivp (non_tos_index); 2493 else if (dest_is_tos) __ fdivr (non_tos_index); 2494 else __ fdiva (non_tos_index); 2495 } 2496 break; 2497 2498 case lir_rem: 2499 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2500 __ fremr(noreg); 2501 break; 2502 2503 default: 2504 ShouldNotReachHere(); 2505 } 2506 } 2507 2508 2509 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 2510 if (value->is_double_xmm()) { 2511 switch(code) { 2512 case lir_abs : 2513 { 2514 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2515 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2516 } 2517 __ andpd(dest->as_xmm_double_reg(), 2518 ExternalAddress((address)double_signmask_pool)); 2519 } 2520 break; 2521 2522 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2523 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2524 default : ShouldNotReachHere(); 2525 } 2526 2527 } else if (value->is_double_fpu()) { 2528 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2529 switch(code) { 2530 case lir_log : __ flog() ; break; 2531 case lir_log10 : __ flog10() ; break; 2532 case lir_abs : __ fabs() ; break; 2533 case lir_sqrt : __ fsqrt(); break; 2534 case lir_sin : 2535 // Should consider not saving rbx, if not necessary 2536 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); 2537 break; 2538 case lir_cos : 2539 // Should consider not saving rbx, if not necessary 2540 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); 2541 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); 2542 break; 2543 case lir_tan : 2544 // Should consider not saving rbx, if not necessary 2545 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); 2546 break; 2547 case lir_exp : 2548 __ exp_with_fallback(op->as_Op2()->fpu_stack_size()); 2549 break; 2550 case lir_pow : 2551 __ pow_with_fallback(op->as_Op2()->fpu_stack_size()); 2552 break; 2553 default : ShouldNotReachHere(); 2554 } 2555 } else { 2556 Unimplemented(); 2557 } 2558 } 2559 2560 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2561 // assert(left->destroys_register(), "check"); 2562 if (left->is_single_cpu()) { 2563 Register reg = left->as_register(); 2564 if (right->is_constant()) { 2565 int val = right->as_constant_ptr()->as_jint(); 2566 switch (code) { 2567 case lir_logic_and: __ andl (reg, val); break; 2568 case lir_logic_or: __ orl (reg, val); break; 2569 case lir_logic_xor: __ xorl (reg, val); break; 2570 default: ShouldNotReachHere(); 2571 } 2572 } else if (right->is_stack()) { 2573 // added support for stack operands 2574 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2575 switch (code) { 2576 case lir_logic_and: __ andl (reg, raddr); break; 2577 case lir_logic_or: __ orl (reg, raddr); break; 2578 case lir_logic_xor: __ xorl (reg, raddr); break; 2579 default: ShouldNotReachHere(); 2580 } 2581 } else { 2582 Register rright = right->as_register(); 2583 switch (code) { 2584 case lir_logic_and: __ andptr (reg, rright); break; 2585 case lir_logic_or : __ orptr (reg, rright); break; 2586 case lir_logic_xor: __ xorptr (reg, rright); break; 2587 default: ShouldNotReachHere(); 2588 } 2589 } 2590 move_regs(reg, dst->as_register()); 2591 } else { 2592 Register l_lo = left->as_register_lo(); 2593 Register l_hi = left->as_register_hi(); 2594 if (right->is_constant()) { 2595 #ifdef _LP64 2596 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2597 switch (code) { 2598 case lir_logic_and: 2599 __ andq(l_lo, rscratch1); 2600 break; 2601 case lir_logic_or: 2602 __ orq(l_lo, rscratch1); 2603 break; 2604 case lir_logic_xor: 2605 __ xorq(l_lo, rscratch1); 2606 break; 2607 default: ShouldNotReachHere(); 2608 } 2609 #else 2610 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2611 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2612 switch (code) { 2613 case lir_logic_and: 2614 __ andl(l_lo, r_lo); 2615 __ andl(l_hi, r_hi); 2616 break; 2617 case lir_logic_or: 2618 __ orl(l_lo, r_lo); 2619 __ orl(l_hi, r_hi); 2620 break; 2621 case lir_logic_xor: 2622 __ xorl(l_lo, r_lo); 2623 __ xorl(l_hi, r_hi); 2624 break; 2625 default: ShouldNotReachHere(); 2626 } 2627 #endif // _LP64 2628 } else { 2629 #ifdef _LP64 2630 Register r_lo; 2631 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { 2632 r_lo = right->as_register(); 2633 } else { 2634 r_lo = right->as_register_lo(); 2635 } 2636 #else 2637 Register r_lo = right->as_register_lo(); 2638 Register r_hi = right->as_register_hi(); 2639 assert(l_lo != r_hi, "overwriting registers"); 2640 #endif 2641 switch (code) { 2642 case lir_logic_and: 2643 __ andptr(l_lo, r_lo); 2644 NOT_LP64(__ andptr(l_hi, r_hi);) 2645 break; 2646 case lir_logic_or: 2647 __ orptr(l_lo, r_lo); 2648 NOT_LP64(__ orptr(l_hi, r_hi);) 2649 break; 2650 case lir_logic_xor: 2651 __ xorptr(l_lo, r_lo); 2652 NOT_LP64(__ xorptr(l_hi, r_hi);) 2653 break; 2654 default: ShouldNotReachHere(); 2655 } 2656 } 2657 2658 Register dst_lo = dst->as_register_lo(); 2659 Register dst_hi = dst->as_register_hi(); 2660 2661 #ifdef _LP64 2662 move_regs(l_lo, dst_lo); 2663 #else 2664 if (dst_lo == l_hi) { 2665 assert(dst_hi != l_lo, "overwriting registers"); 2666 move_regs(l_hi, dst_hi); 2667 move_regs(l_lo, dst_lo); 2668 } else { 2669 assert(dst_lo != l_hi, "overwriting registers"); 2670 move_regs(l_lo, dst_lo); 2671 move_regs(l_hi, dst_hi); 2672 } 2673 #endif // _LP64 2674 } 2675 } 2676 2677 2678 // we assume that rax, and rdx can be overwritten 2679 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2680 2681 assert(left->is_single_cpu(), "left must be register"); 2682 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2683 assert(result->is_single_cpu(), "result must be register"); 2684 2685 // assert(left->destroys_register(), "check"); 2686 // assert(right->destroys_register(), "check"); 2687 2688 Register lreg = left->as_register(); 2689 Register dreg = result->as_register(); 2690 2691 if (right->is_constant()) { 2692 int divisor = right->as_constant_ptr()->as_jint(); 2693 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2694 if (code == lir_idiv) { 2695 assert(lreg == rax, "must be rax,"); 2696 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2697 __ cdql(); // sign extend into rdx:rax 2698 if (divisor == 2) { 2699 __ subl(lreg, rdx); 2700 } else { 2701 __ andl(rdx, divisor - 1); 2702 __ addl(lreg, rdx); 2703 } 2704 __ sarl(lreg, log2_intptr(divisor)); 2705 move_regs(lreg, dreg); 2706 } else if (code == lir_irem) { 2707 Label done; 2708 __ mov(dreg, lreg); 2709 __ andl(dreg, 0x80000000 | (divisor - 1)); 2710 __ jcc(Assembler::positive, done); 2711 __ decrement(dreg); 2712 __ orl(dreg, ~(divisor - 1)); 2713 __ increment(dreg); 2714 __ bind(done); 2715 } else { 2716 ShouldNotReachHere(); 2717 } 2718 } else { 2719 Register rreg = right->as_register(); 2720 assert(lreg == rax, "left register must be rax,"); 2721 assert(rreg != rdx, "right register must not be rdx"); 2722 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2723 2724 move_regs(lreg, rax); 2725 2726 int idivl_offset = __ corrected_idivl(rreg); 2727 add_debug_info_for_div0(idivl_offset, info); 2728 if (code == lir_irem) { 2729 move_regs(rdx, dreg); // result is in rdx 2730 } else { 2731 move_regs(rax, dreg); 2732 } 2733 } 2734 } 2735 2736 2737 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2738 if (opr1->is_single_cpu()) { 2739 Register reg1 = opr1->as_register(); 2740 if (opr2->is_single_cpu()) { 2741 // cpu register - cpu register 2742 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2743 __ cmpptr(reg1, opr2->as_register()); 2744 } else { 2745 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 2746 __ cmpl(reg1, opr2->as_register()); 2747 } 2748 } else if (opr2->is_stack()) { 2749 // cpu register - stack 2750 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2751 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2752 } else { 2753 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2754 } 2755 } else if (opr2->is_constant()) { 2756 // cpu register - constant 2757 LIR_Const* c = opr2->as_constant_ptr(); 2758 if (c->type() == T_INT) { 2759 __ cmpl(reg1, c->as_jint()); 2760 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2761 // In 64bit oops are single register 2762 jobject o = c->as_jobject(); 2763 if (o == NULL) { 2764 __ cmpptr(reg1, (int32_t)NULL_WORD); 2765 } else { 2766 #ifdef _LP64 2767 __ movoop(rscratch1, o); 2768 __ cmpptr(reg1, rscratch1); 2769 #else 2770 __ cmpoop(reg1, c->as_jobject()); 2771 #endif // _LP64 2772 } 2773 } else { 2774 fatal(err_msg("unexpected type: %s", basictype_to_str(c->type()))); 2775 } 2776 // cpu register - address 2777 } else if (opr2->is_address()) { 2778 if (op->info() != NULL) { 2779 add_debug_info_for_null_check_here(op->info()); 2780 } 2781 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2782 } else { 2783 ShouldNotReachHere(); 2784 } 2785 2786 } else if(opr1->is_double_cpu()) { 2787 Register xlo = opr1->as_register_lo(); 2788 Register xhi = opr1->as_register_hi(); 2789 if (opr2->is_double_cpu()) { 2790 #ifdef _LP64 2791 __ cmpptr(xlo, opr2->as_register_lo()); 2792 #else 2793 // cpu register - cpu register 2794 Register ylo = opr2->as_register_lo(); 2795 Register yhi = opr2->as_register_hi(); 2796 __ subl(xlo, ylo); 2797 __ sbbl(xhi, yhi); 2798 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2799 __ orl(xhi, xlo); 2800 } 2801 #endif // _LP64 2802 } else if (opr2->is_constant()) { 2803 // cpu register - constant 0 2804 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2805 #ifdef _LP64 2806 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2807 #else 2808 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2809 __ orl(xhi, xlo); 2810 #endif // _LP64 2811 } else { 2812 ShouldNotReachHere(); 2813 } 2814 2815 } else if (opr1->is_single_xmm()) { 2816 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2817 if (opr2->is_single_xmm()) { 2818 // xmm register - xmm register 2819 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2820 } else if (opr2->is_stack()) { 2821 // xmm register - stack 2822 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2823 } else if (opr2->is_constant()) { 2824 // xmm register - constant 2825 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2826 } else if (opr2->is_address()) { 2827 // xmm register - address 2828 if (op->info() != NULL) { 2829 add_debug_info_for_null_check_here(op->info()); 2830 } 2831 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2832 } else { 2833 ShouldNotReachHere(); 2834 } 2835 2836 } else if (opr1->is_double_xmm()) { 2837 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2838 if (opr2->is_double_xmm()) { 2839 // xmm register - xmm register 2840 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2841 } else if (opr2->is_stack()) { 2842 // xmm register - stack 2843 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2844 } else if (opr2->is_constant()) { 2845 // xmm register - constant 2846 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2847 } else if (opr2->is_address()) { 2848 // xmm register - address 2849 if (op->info() != NULL) { 2850 add_debug_info_for_null_check_here(op->info()); 2851 } 2852 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2853 } else { 2854 ShouldNotReachHere(); 2855 } 2856 2857 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2858 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2859 assert(opr2->is_fpu_register(), "both must be registers"); 2860 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2861 2862 } else if (opr1->is_address() && opr2->is_constant()) { 2863 LIR_Const* c = opr2->as_constant_ptr(); 2864 #ifdef _LP64 2865 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2866 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2867 __ movoop(rscratch1, c->as_jobject()); 2868 } 2869 #endif // LP64 2870 if (op->info() != NULL) { 2871 add_debug_info_for_null_check_here(op->info()); 2872 } 2873 // special case: address - constant 2874 LIR_Address* addr = opr1->as_address_ptr(); 2875 if (c->type() == T_INT) { 2876 __ cmpl(as_Address(addr), c->as_jint()); 2877 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2878 #ifdef _LP64 2879 // %%% Make this explode if addr isn't reachable until we figure out a 2880 // better strategy by giving noreg as the temp for as_Address 2881 __ cmpptr(rscratch1, as_Address(addr, noreg)); 2882 #else 2883 __ cmpoop(as_Address(addr), c->as_jobject()); 2884 #endif // _LP64 2885 } else { 2886 ShouldNotReachHere(); 2887 } 2888 2889 } else { 2890 ShouldNotReachHere(); 2891 } 2892 } 2893 2894 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2895 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2896 if (left->is_single_xmm()) { 2897 assert(right->is_single_xmm(), "must match"); 2898 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2899 } else if (left->is_double_xmm()) { 2900 assert(right->is_double_xmm(), "must match"); 2901 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2902 2903 } else { 2904 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2905 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2906 2907 assert(left->fpu() == 0, "left must be on TOS"); 2908 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2909 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2910 } 2911 } else { 2912 assert(code == lir_cmp_l2i, "check"); 2913 #ifdef _LP64 2914 Label done; 2915 Register dest = dst->as_register(); 2916 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2917 __ movl(dest, -1); 2918 __ jccb(Assembler::less, done); 2919 __ set_byte_if_not_zero(dest); 2920 __ movzbl(dest, dest); 2921 __ bind(done); 2922 #else 2923 __ lcmp2int(left->as_register_hi(), 2924 left->as_register_lo(), 2925 right->as_register_hi(), 2926 right->as_register_lo()); 2927 move_regs(left->as_register_hi(), dst->as_register()); 2928 #endif // _LP64 2929 } 2930 } 2931 2932 2933 void LIR_Assembler::align_call(LIR_Code code) { 2934 if (os::is_MP()) { 2935 // make sure that the displacement word of the call ends up word aligned 2936 int offset = __ offset(); 2937 switch (code) { 2938 case lir_static_call: 2939 case lir_optvirtual_call: 2940 case lir_dynamic_call: 2941 offset += NativeCall::displacement_offset; 2942 break; 2943 case lir_icvirtual_call: 2944 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; 2945 break; 2946 case lir_virtual_call: // currently, sparc-specific for niagara 2947 default: ShouldNotReachHere(); 2948 } 2949 while (offset++ % BytesPerWord != 0) { 2950 __ nop(); 2951 } 2952 } 2953 } 2954 2955 2956 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 2957 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2958 "must be aligned"); 2959 __ call(AddressLiteral(op->addr(), rtype)); 2960 add_call_info(code_offset(), op->info()); 2961 } 2962 2963 2964 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 2965 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 2966 __ movptr(rax, (intptr_t)Universe::non_oop_word()); 2967 assert(!os::is_MP() || 2968 (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2969 "must be aligned"); 2970 2971 if (op->info()->is_profiled_call()) { 2972 // The static call stub is not used for standard ic calls (a 2973 // transition stub is allocated instead for calls to the 2974 // interpreter). We emit the static call stub for profiled call 2975 // sites anyway because the runtime locates the profile call stub 2976 // by first looking up the static call stub and then walking over 2977 // it to the profile call stub. 2978 emit_static_call_stub(); 2979 // Emit the profile call stub right behind the static call stub 2980 emit_profile_call_stub(op->info()->method(), op->info()->stack()->bci(), SharedRuntime::get_resolve_profile_call_stub()); 2981 } 2982 2983 __ call(AddressLiteral(op->addr(), rh)); 2984 add_call_info(code_offset(), op->info()); 2985 } 2986 2987 2988 /* Currently, vtable-dispatch is only enabled for sparc platforms */ 2989 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 2990 ShouldNotReachHere(); 2991 } 2992 2993 2994 void LIR_Assembler::emit_static_call_stub() { 2995 address call_pc = __ pc(); 2996 address stub = __ start_a_stub(call_stub_size); 2997 if (stub == NULL) { 2998 bailout("static call stub overflow"); 2999 return; 3000 } 3001 3002 int start = __ offset(); 3003 if (os::is_MP()) { 3004 // make sure that the displacement word of the call ends up word aligned 3005 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; 3006 while (offset++ % BytesPerWord != 0) { 3007 __ nop(); 3008 } 3009 } 3010 __ relocate(static_stub_Relocation::spec(call_pc)); 3011 __ mov_metadata(rbx, (Metadata*)NULL); 3012 // must be set to -1 at code generation time 3013 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); 3014 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 3015 __ jump(RuntimeAddress(__ pc())); 3016 3017 assert(__ offset() - start <= call_stub_size, "stub too big"); 3018 __ end_a_stub(); 3019 } 3020 3021 void LIR_Assembler::emit_profile_call_stub(ciMethod* method, int bci, address dest) { 3022 ciMethodData* md = method->method_data(); 3023 if (md == NULL) { 3024 bailout("out of memory building methodDataOop"); 3025 return; 3026 } 3027 address call_pc = __ pc(); 3028 address stub = __ start_a_stub(profile_call_stub_size); 3029 if (stub == NULL) { 3030 bailout("profile call stub overflow"); 3031 return; 3032 } 3033 3034 int start = __ offset(); 3035 address off_addr = __ pc(); 3036 3037 // The runtime needs the starting address of the profile call stub 3038 // (to make the call site jump to the stub) and the location of the 3039 // first jump in the stub (to make it branch to the callee). The 3040 // starting address is found by first looking up the static call 3041 // stub and then finding the profile call stub right behind 3042 // it. Finding the jump is tricky because the code emitted before it 3043 // depends on runtime conditions. Here, we first emit an integer (0) 3044 // that we change to contain the offset of the jump within the stub 3045 // when the jump is emitted and the offset is known. Locating the 3046 // jump can then be done from the runtime by reading this offset and 3047 // adding it to the address of the start of the stub. 3048 __ emit_int32(0); 3049 3050 ciProfileData* data = md->bci_to_data(bci); 3051 assert(data->is_CounterData(), "need CounterData for calls"); 3052 3053 Register tmp = NOT_LP64(rdi) LP64_ONLY(r12); 3054 3055 __ mov_metadata(tmp, md->constant_encoding()); 3056 Address counter_addr(tmp, md->byte_offset_of_slot(data, CounterData::count_offset())); 3057 __ addl(counter_addr, DataLayout::counter_increment); 3058 __ cmpl(counter_addr, C1ProfileCompileThreshold); 3059 Label L; 3060 __ jcc(Assembler::greater, L); 3061 3062 *(jint*)off_addr = __ offset() - start; 3063 __ jump(RuntimeAddress(__ pc())); 3064 3065 __ bind(L); 3066 __ jump(RuntimeAddress(dest)); 3067 3068 assert(__ offset() - start <= profile_call_stub_size, "stub too big"); 3069 __ end_a_stub(); 3070 } 3071 3072 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 3073 assert(exceptionOop->as_register() == rax, "must match"); 3074 assert(exceptionPC->as_register() == rdx, "must match"); 3075 3076 // exception object is not added to oop map by LinearScan 3077 // (LinearScan assumes that no oops are in fixed registers) 3078 info->add_register_oop(exceptionOop); 3079 Runtime1::StubID unwind_id; 3080 3081 // get current pc information 3082 // pc is only needed if the method has an exception handler, the unwind code does not need it. 3083 int pc_for_athrow_offset = __ offset(); 3084 InternalAddress pc_for_athrow(__ pc()); 3085 __ lea(exceptionPC->as_register(), pc_for_athrow); 3086 add_call_info(pc_for_athrow_offset, info); // for exception handler 3087 3088 __ verify_not_null_oop(rax); 3089 // search an exception handler (rax: exception oop, rdx: throwing pc) 3090 if (compilation()->has_fpu_code()) { 3091 unwind_id = Runtime1::handle_exception_id; 3092 } else { 3093 unwind_id = Runtime1::handle_exception_nofpu_id; 3094 } 3095 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 3096 3097 // enough room for two byte trap 3098 __ nop(); 3099 } 3100 3101 3102 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 3103 assert(exceptionOop->as_register() == rax, "must match"); 3104 3105 __ jmp(_unwind_handler_entry); 3106 } 3107 3108 3109 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 3110 3111 // optimized version for linear scan: 3112 // * count must be already in ECX (guaranteed by LinearScan) 3113 // * left and dest must be equal 3114 // * tmp must be unused 3115 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 3116 assert(left == dest, "left and dest must be equal"); 3117 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3118 3119 if (left->is_single_cpu()) { 3120 Register value = left->as_register(); 3121 assert(value != SHIFT_count, "left cannot be ECX"); 3122 3123 switch (code) { 3124 case lir_shl: __ shll(value); break; 3125 case lir_shr: __ sarl(value); break; 3126 case lir_ushr: __ shrl(value); break; 3127 default: ShouldNotReachHere(); 3128 } 3129 } else if (left->is_double_cpu()) { 3130 Register lo = left->as_register_lo(); 3131 Register hi = left->as_register_hi(); 3132 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3133 #ifdef _LP64 3134 switch (code) { 3135 case lir_shl: __ shlptr(lo); break; 3136 case lir_shr: __ sarptr(lo); break; 3137 case lir_ushr: __ shrptr(lo); break; 3138 default: ShouldNotReachHere(); 3139 } 3140 #else 3141 3142 switch (code) { 3143 case lir_shl: __ lshl(hi, lo); break; 3144 case lir_shr: __ lshr(hi, lo, true); break; 3145 case lir_ushr: __ lshr(hi, lo, false); break; 3146 default: ShouldNotReachHere(); 3147 } 3148 #endif // LP64 3149 } else { 3150 ShouldNotReachHere(); 3151 } 3152 } 3153 3154 3155 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3156 if (dest->is_single_cpu()) { 3157 // first move left into dest so that left is not destroyed by the shift 3158 Register value = dest->as_register(); 3159 count = count & 0x1F; // Java spec 3160 3161 move_regs(left->as_register(), value); 3162 switch (code) { 3163 case lir_shl: __ shll(value, count); break; 3164 case lir_shr: __ sarl(value, count); break; 3165 case lir_ushr: __ shrl(value, count); break; 3166 default: ShouldNotReachHere(); 3167 } 3168 } else if (dest->is_double_cpu()) { 3169 #ifndef _LP64 3170 Unimplemented(); 3171 #else 3172 // first move left into dest so that left is not destroyed by the shift 3173 Register value = dest->as_register_lo(); 3174 count = count & 0x1F; // Java spec 3175 3176 move_regs(left->as_register_lo(), value); 3177 switch (code) { 3178 case lir_shl: __ shlptr(value, count); break; 3179 case lir_shr: __ sarptr(value, count); break; 3180 case lir_ushr: __ shrptr(value, count); break; 3181 default: ShouldNotReachHere(); 3182 } 3183 #endif // _LP64 3184 } else { 3185 ShouldNotReachHere(); 3186 } 3187 } 3188 3189 3190 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3191 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3192 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3193 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3194 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3195 } 3196 3197 3198 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3199 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3200 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3201 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3202 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3203 } 3204 3205 3206 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3207 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3208 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3209 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3210 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); 3211 } 3212 3213 3214 // This code replaces a call to arraycopy; no exception may 3215 // be thrown in this code, they must be thrown in the System.arraycopy 3216 // activation frame; we could save some checks if this would not be the case 3217 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3218 ciArrayKlass* default_type = op->expected_type(); 3219 Register src = op->src()->as_register(); 3220 Register dst = op->dst()->as_register(); 3221 Register src_pos = op->src_pos()->as_register(); 3222 Register dst_pos = op->dst_pos()->as_register(); 3223 Register length = op->length()->as_register(); 3224 Register tmp = op->tmp()->as_register(); 3225 3226 CodeStub* stub = op->stub(); 3227 int flags = op->flags(); 3228 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 3229 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 3230 3231 // if we don't know anything, just go through the generic arraycopy 3232 if (default_type == NULL) { 3233 Label done; 3234 // save outgoing arguments on stack in case call to System.arraycopy is needed 3235 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3236 // for interpreter calling conventions. Now we have to do it in new style conventions. 3237 // For the moment until C1 gets the new register allocator I just force all the 3238 // args to the right place (except the register args) and then on the back side 3239 // reload the register args properly if we go slow path. Yuck 3240 3241 // These are proper for the calling convention 3242 store_parameter(length, 2); 3243 store_parameter(dst_pos, 1); 3244 store_parameter(dst, 0); 3245 3246 // these are just temporary placements until we need to reload 3247 store_parameter(src_pos, 3); 3248 store_parameter(src, 4); 3249 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3250 3251 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 3252 3253 address copyfunc_addr = StubRoutines::generic_arraycopy(); 3254 3255 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3256 #ifdef _LP64 3257 // The arguments are in java calling convention so we can trivially shift them to C 3258 // convention 3259 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3260 __ mov(c_rarg0, j_rarg0); 3261 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3262 __ mov(c_rarg1, j_rarg1); 3263 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3264 __ mov(c_rarg2, j_rarg2); 3265 assert_different_registers(c_rarg3, j_rarg4); 3266 __ mov(c_rarg3, j_rarg3); 3267 #ifdef _WIN64 3268 // Allocate abi space for args but be sure to keep stack aligned 3269 __ subptr(rsp, 6*wordSize); 3270 store_parameter(j_rarg4, 4); 3271 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3272 __ call(RuntimeAddress(C_entry)); 3273 } else { 3274 #ifndef PRODUCT 3275 if (PrintC1Statistics) { 3276 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3277 } 3278 #endif 3279 __ call(RuntimeAddress(copyfunc_addr)); 3280 } 3281 __ addptr(rsp, 6*wordSize); 3282 #else 3283 __ mov(c_rarg4, j_rarg4); 3284 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3285 __ call(RuntimeAddress(C_entry)); 3286 } else { 3287 #ifndef PRODUCT 3288 if (PrintC1Statistics) { 3289 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3290 } 3291 #endif 3292 __ call(RuntimeAddress(copyfunc_addr)); 3293 } 3294 #endif // _WIN64 3295 #else 3296 __ push(length); 3297 __ push(dst_pos); 3298 __ push(dst); 3299 __ push(src_pos); 3300 __ push(src); 3301 3302 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3303 __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack 3304 } else { 3305 #ifndef PRODUCT 3306 if (PrintC1Statistics) { 3307 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3308 } 3309 #endif 3310 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 3311 } 3312 3313 #endif // _LP64 3314 3315 __ cmpl(rax, 0); 3316 __ jcc(Assembler::equal, *stub->continuation()); 3317 3318 if (copyfunc_addr != NULL) { 3319 __ mov(tmp, rax); 3320 __ xorl(tmp, -1); 3321 } 3322 3323 // Reload values from the stack so they are where the stub 3324 // expects them. 3325 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3326 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3327 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3328 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3329 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3330 3331 if (copyfunc_addr != NULL) { 3332 __ subl(length, tmp); 3333 __ addl(src_pos, tmp); 3334 __ addl(dst_pos, tmp); 3335 } 3336 __ jmp(*stub->entry()); 3337 3338 __ bind(*stub->continuation()); 3339 return; 3340 } 3341 3342 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3343 3344 int elem_size = type2aelembytes(basic_type); 3345 int shift_amount; 3346 Address::ScaleFactor scale; 3347 3348 switch (elem_size) { 3349 case 1 : 3350 shift_amount = 0; 3351 scale = Address::times_1; 3352 break; 3353 case 2 : 3354 shift_amount = 1; 3355 scale = Address::times_2; 3356 break; 3357 case 4 : 3358 shift_amount = 2; 3359 scale = Address::times_4; 3360 break; 3361 case 8 : 3362 shift_amount = 3; 3363 scale = Address::times_8; 3364 break; 3365 default: 3366 ShouldNotReachHere(); 3367 } 3368 3369 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3370 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3371 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3372 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3373 3374 // length and pos's are all sign extended at this point on 64bit 3375 3376 // test for NULL 3377 if (flags & LIR_OpArrayCopy::src_null_check) { 3378 __ testptr(src, src); 3379 __ jcc(Assembler::zero, *stub->entry()); 3380 } 3381 if (flags & LIR_OpArrayCopy::dst_null_check) { 3382 __ testptr(dst, dst); 3383 __ jcc(Assembler::zero, *stub->entry()); 3384 } 3385 3386 // check if negative 3387 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3388 __ testl(src_pos, src_pos); 3389 __ jcc(Assembler::less, *stub->entry()); 3390 } 3391 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3392 __ testl(dst_pos, dst_pos); 3393 __ jcc(Assembler::less, *stub->entry()); 3394 } 3395 3396 if (flags & LIR_OpArrayCopy::src_range_check) { 3397 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3398 __ cmpl(tmp, src_length_addr); 3399 __ jcc(Assembler::above, *stub->entry()); 3400 } 3401 if (flags & LIR_OpArrayCopy::dst_range_check) { 3402 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3403 __ cmpl(tmp, dst_length_addr); 3404 __ jcc(Assembler::above, *stub->entry()); 3405 } 3406 3407 if (flags & LIR_OpArrayCopy::length_positive_check) { 3408 __ testl(length, length); 3409 __ jcc(Assembler::less, *stub->entry()); 3410 __ jcc(Assembler::zero, *stub->continuation()); 3411 } 3412 3413 #ifdef _LP64 3414 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3415 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3416 #endif 3417 3418 if (flags & LIR_OpArrayCopy::type_check) { 3419 // We don't know the array types are compatible 3420 if (basic_type != T_OBJECT) { 3421 // Simple test for basic type arrays 3422 if (UseCompressedClassPointers) { 3423 __ movl(tmp, src_klass_addr); 3424 __ cmpl(tmp, dst_klass_addr); 3425 } else { 3426 __ movptr(tmp, src_klass_addr); 3427 __ cmpptr(tmp, dst_klass_addr); 3428 } 3429 __ jcc(Assembler::notEqual, *stub->entry()); 3430 } else { 3431 // For object arrays, if src is a sub class of dst then we can 3432 // safely do the copy. 3433 Label cont, slow; 3434 3435 __ push(src); 3436 __ push(dst); 3437 3438 __ load_klass(src, src); 3439 __ load_klass(dst, dst); 3440 3441 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); 3442 3443 __ push(src); 3444 __ push(dst); 3445 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 3446 __ pop(dst); 3447 __ pop(src); 3448 3449 __ cmpl(src, 0); 3450 __ jcc(Assembler::notEqual, cont); 3451 3452 __ bind(slow); 3453 __ pop(dst); 3454 __ pop(src); 3455 3456 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 3457 if (copyfunc_addr != NULL) { // use stub if available 3458 // src is not a sub class of dst so we have to do a 3459 // per-element check. 3460 3461 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 3462 if ((flags & mask) != mask) { 3463 // Check that at least both of them object arrays. 3464 assert(flags & mask, "one of the two should be known to be an object array"); 3465 3466 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3467 __ load_klass(tmp, src); 3468 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3469 __ load_klass(tmp, dst); 3470 } 3471 int lh_offset = in_bytes(Klass::layout_helper_offset()); 3472 Address klass_lh_addr(tmp, lh_offset); 3473 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 3474 __ cmpl(klass_lh_addr, objArray_lh); 3475 __ jcc(Assembler::notEqual, *stub->entry()); 3476 } 3477 3478 // Spill because stubs can use any register they like and it's 3479 // easier to restore just those that we care about. 3480 store_parameter(dst, 0); 3481 store_parameter(dst_pos, 1); 3482 store_parameter(length, 2); 3483 store_parameter(src_pos, 3); 3484 store_parameter(src, 4); 3485 3486 #ifndef _LP64 3487 __ movptr(tmp, dst_klass_addr); 3488 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 3489 __ push(tmp); 3490 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 3491 __ push(tmp); 3492 __ push(length); 3493 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3494 __ push(tmp); 3495 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3496 __ push(tmp); 3497 3498 __ call_VM_leaf(copyfunc_addr, 5); 3499 #else 3500 __ movl2ptr(length, length); //higher 32bits must be null 3501 3502 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3503 assert_different_registers(c_rarg0, dst, dst_pos, length); 3504 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3505 assert_different_registers(c_rarg1, dst, length); 3506 3507 __ mov(c_rarg2, length); 3508 assert_different_registers(c_rarg2, dst); 3509 3510 #ifdef _WIN64 3511 // Allocate abi space for args but be sure to keep stack aligned 3512 __ subptr(rsp, 6*wordSize); 3513 __ load_klass(c_rarg3, dst); 3514 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 3515 store_parameter(c_rarg3, 4); 3516 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 3517 __ call(RuntimeAddress(copyfunc_addr)); 3518 __ addptr(rsp, 6*wordSize); 3519 #else 3520 __ load_klass(c_rarg4, dst); 3521 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 3522 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 3523 __ call(RuntimeAddress(copyfunc_addr)); 3524 #endif 3525 3526 #endif 3527 3528 #ifndef PRODUCT 3529 if (PrintC1Statistics) { 3530 Label failed; 3531 __ testl(rax, rax); 3532 __ jcc(Assembler::notZero, failed); 3533 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); 3534 __ bind(failed); 3535 } 3536 #endif 3537 3538 __ testl(rax, rax); 3539 __ jcc(Assembler::zero, *stub->continuation()); 3540 3541 #ifndef PRODUCT 3542 if (PrintC1Statistics) { 3543 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); 3544 } 3545 #endif 3546 3547 __ mov(tmp, rax); 3548 3549 __ xorl(tmp, -1); 3550 3551 // Restore previously spilled arguments 3552 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3553 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3554 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3555 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3556 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3557 3558 3559 __ subl(length, tmp); 3560 __ addl(src_pos, tmp); 3561 __ addl(dst_pos, tmp); 3562 } 3563 3564 __ jmp(*stub->entry()); 3565 3566 __ bind(cont); 3567 __ pop(dst); 3568 __ pop(src); 3569 } 3570 } 3571 3572 #ifdef ASSERT 3573 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3574 // Sanity check the known type with the incoming class. For the 3575 // primitive case the types must match exactly with src.klass and 3576 // dst.klass each exactly matching the default type. For the 3577 // object array case, if no type check is needed then either the 3578 // dst type is exactly the expected type and the src type is a 3579 // subtype which we can't check or src is the same array as dst 3580 // but not necessarily exactly of type default_type. 3581 Label known_ok, halt; 3582 __ mov_metadata(tmp, default_type->constant_encoding()); 3583 #ifdef _LP64 3584 if (UseCompressedClassPointers) { 3585 __ encode_klass_not_null(tmp); 3586 } 3587 #endif 3588 3589 if (basic_type != T_OBJECT) { 3590 3591 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3592 else __ cmpptr(tmp, dst_klass_addr); 3593 __ jcc(Assembler::notEqual, halt); 3594 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); 3595 else __ cmpptr(tmp, src_klass_addr); 3596 __ jcc(Assembler::equal, known_ok); 3597 } else { 3598 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3599 else __ cmpptr(tmp, dst_klass_addr); 3600 __ jcc(Assembler::equal, known_ok); 3601 __ cmpptr(src, dst); 3602 __ jcc(Assembler::equal, known_ok); 3603 } 3604 __ bind(halt); 3605 __ stop("incorrect type information in arraycopy"); 3606 __ bind(known_ok); 3607 } 3608 #endif 3609 3610 #ifndef PRODUCT 3611 if (PrintC1Statistics) { 3612 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); 3613 } 3614 #endif 3615 3616 #ifdef _LP64 3617 assert_different_registers(c_rarg0, dst, dst_pos, length); 3618 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3619 assert_different_registers(c_rarg1, length); 3620 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3621 __ mov(c_rarg2, length); 3622 3623 #else 3624 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3625 store_parameter(tmp, 0); 3626 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3627 store_parameter(tmp, 1); 3628 store_parameter(length, 2); 3629 #endif // _LP64 3630 3631 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3632 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3633 const char *name; 3634 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3635 __ call_VM_leaf(entry, 0); 3636 3637 __ bind(*stub->continuation()); 3638 } 3639 3640 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3641 assert(op->crc()->is_single_cpu(), "crc must be register"); 3642 assert(op->val()->is_single_cpu(), "byte value must be register"); 3643 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3644 Register crc = op->crc()->as_register(); 3645 Register val = op->val()->as_register(); 3646 Register res = op->result_opr()->as_register(); 3647 3648 assert_different_registers(val, crc, res); 3649 3650 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3651 __ notl(crc); // ~crc 3652 __ update_byte_crc32(crc, val, res); 3653 __ notl(crc); // ~crc 3654 __ mov(res, crc); 3655 } 3656 3657 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3658 Register obj = op->obj_opr()->as_register(); // may not be an oop 3659 Register hdr = op->hdr_opr()->as_register(); 3660 Register lock = op->lock_opr()->as_register(); 3661 if (!UseFastLocking) { 3662 __ jmp(*op->stub()->entry()); 3663 } else if (op->code() == lir_lock) { 3664 Register scratch = noreg; 3665 if (UseBiasedLocking) { 3666 scratch = op->scratch_opr()->as_register(); 3667 } 3668 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3669 // add debug info for NullPointerException only if one is possible 3670 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 3671 if (op->info() != NULL) { 3672 add_debug_info_for_null_check(null_check_offset, op->info()); 3673 } 3674 // done 3675 } else if (op->code() == lir_unlock) { 3676 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3677 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3678 } else { 3679 Unimplemented(); 3680 } 3681 __ bind(*op->stub()->continuation()); 3682 } 3683 3684 3685 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3686 ciMethod* method = op->profiled_method(); 3687 int bci = op->profiled_bci(); 3688 ciMethod* callee = op->profiled_callee(); 3689 3690 // Update counter for all call types 3691 ciMethodData* md = method->method_data_or_null(); 3692 assert(md != NULL, "Sanity"); 3693 ciProfileData* data = md->bci_to_data(bci); 3694 assert(data->is_CounterData(), "need CounterData for calls"); 3695 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3696 Register mdo = op->mdo()->as_register(); 3697 __ mov_metadata(mdo, md->constant_encoding()); 3698 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3699 Bytecodes::Code bc = method->java_code_at_bci(bci); 3700 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 3701 // Perform additional virtual call profiling for invokevirtual and 3702 // invokeinterface bytecodes 3703 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3704 !callee_is_static && // required for optimized MH invokes 3705 C1ProfileVirtualCalls) { 3706 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3707 Register recv = op->recv()->as_register(); 3708 assert_different_registers(mdo, recv); 3709 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3710 ciKlass* known_klass = op->known_holder(); 3711 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3712 // We know the type that will be seen at this call site; we can 3713 // statically update the MethodData* rather than needing to do 3714 // dynamic tests on the receiver type 3715 3716 // NOTE: we should probably put a lock around this search to 3717 // avoid collisions by concurrent compilations 3718 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3719 uint i; 3720 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3721 ciKlass* receiver = vc_data->receiver(i); 3722 if (known_klass->equals(receiver)) { 3723 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3724 __ addptr(data_addr, DataLayout::counter_increment); 3725 return; 3726 } 3727 } 3728 3729 // Receiver type not found in profile data; select an empty slot 3730 3731 // Note that this is less efficient than it should be because it 3732 // always does a write to the receiver part of the 3733 // VirtualCallData rather than just the first time 3734 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3735 ciKlass* receiver = vc_data->receiver(i); 3736 if (receiver == NULL) { 3737 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3738 __ mov_metadata(recv_addr, known_klass->constant_encoding()); 3739 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3740 __ addptr(data_addr, DataLayout::counter_increment); 3741 return; 3742 } 3743 } 3744 } else { 3745 __ load_klass(recv, recv); 3746 Label update_done; 3747 type_profile_helper(mdo, md, data, recv, &update_done); 3748 // Receiver did not match any saved receiver and there is no empty row for it. 3749 // Increment total counter to indicate polymorphic case. 3750 __ addptr(counter_addr, DataLayout::counter_increment); 3751 3752 __ bind(update_done); 3753 } 3754 } else { 3755 // Static call 3756 __ addptr(counter_addr, DataLayout::counter_increment); 3757 } 3758 } 3759 3760 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3761 Register obj = op->obj()->as_register(); 3762 Register tmp = op->tmp()->as_pointer_register(); 3763 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3764 ciKlass* exact_klass = op->exact_klass(); 3765 intptr_t current_klass = op->current_klass(); 3766 bool not_null = op->not_null(); 3767 bool no_conflict = op->no_conflict(); 3768 3769 Label update, next, none; 3770 3771 bool do_null = !not_null; 3772 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3773 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3774 3775 assert(do_null || do_update, "why are we here?"); 3776 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3777 3778 __ verify_oop(obj); 3779 3780 if (tmp != obj) { 3781 __ mov(tmp, obj); 3782 } 3783 if (do_null) { 3784 __ testptr(tmp, tmp); 3785 __ jccb(Assembler::notZero, update); 3786 if (!TypeEntries::was_null_seen(current_klass)) { 3787 __ orptr(mdo_addr, TypeEntries::null_seen); 3788 } 3789 if (do_update) { 3790 #ifndef ASSERT 3791 __ jmpb(next); 3792 } 3793 #else 3794 __ jmp(next); 3795 } 3796 } else { 3797 __ testptr(tmp, tmp); 3798 __ jccb(Assembler::notZero, update); 3799 __ stop("unexpect null obj"); 3800 #endif 3801 } 3802 3803 __ bind(update); 3804 3805 if (do_update) { 3806 #ifdef ASSERT 3807 if (exact_klass != NULL) { 3808 Label ok; 3809 __ load_klass(tmp, tmp); 3810 __ push(tmp); 3811 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3812 __ cmpptr(tmp, Address(rsp, 0)); 3813 __ jccb(Assembler::equal, ok); 3814 __ stop("exact klass and actual klass differ"); 3815 __ bind(ok); 3816 __ pop(tmp); 3817 } 3818 #endif 3819 if (!no_conflict) { 3820 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3821 if (exact_klass != NULL) { 3822 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3823 } else { 3824 __ load_klass(tmp, tmp); 3825 } 3826 3827 __ xorptr(tmp, mdo_addr); 3828 __ testptr(tmp, TypeEntries::type_klass_mask); 3829 // klass seen before, nothing to do. The unknown bit may have been 3830 // set already but no need to check. 3831 __ jccb(Assembler::zero, next); 3832 3833 __ testptr(tmp, TypeEntries::type_unknown); 3834 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3835 3836 if (TypeEntries::is_type_none(current_klass)) { 3837 __ cmpptr(mdo_addr, 0); 3838 __ jccb(Assembler::equal, none); 3839 __ cmpptr(mdo_addr, TypeEntries::null_seen); 3840 __ jccb(Assembler::equal, none); 3841 // There is a chance that the checks above (re-reading profiling 3842 // data from memory) fail if another thread has just set the 3843 // profiling to this obj's klass 3844 __ xorptr(tmp, mdo_addr); 3845 __ testptr(tmp, TypeEntries::type_klass_mask); 3846 __ jccb(Assembler::zero, next); 3847 } 3848 } else { 3849 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3850 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3851 3852 __ movptr(tmp, mdo_addr); 3853 __ testptr(tmp, TypeEntries::type_unknown); 3854 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3855 } 3856 3857 // different than before. Cannot keep accurate profile. 3858 __ orptr(mdo_addr, TypeEntries::type_unknown); 3859 3860 if (TypeEntries::is_type_none(current_klass)) { 3861 __ jmpb(next); 3862 3863 __ bind(none); 3864 // first time here. Set profile type. 3865 __ movptr(mdo_addr, tmp); 3866 } 3867 } else { 3868 // There's a single possible klass at this profile point 3869 assert(exact_klass != NULL, "should be"); 3870 if (TypeEntries::is_type_none(current_klass)) { 3871 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3872 __ xorptr(tmp, mdo_addr); 3873 __ testptr(tmp, TypeEntries::type_klass_mask); 3874 #ifdef ASSERT 3875 __ jcc(Assembler::zero, next); 3876 3877 { 3878 Label ok; 3879 __ push(tmp); 3880 __ cmpptr(mdo_addr, 0); 3881 __ jcc(Assembler::equal, ok); 3882 __ cmpptr(mdo_addr, TypeEntries::null_seen); 3883 __ jcc(Assembler::equal, ok); 3884 // may have been set by another thread 3885 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3886 __ xorptr(tmp, mdo_addr); 3887 __ testptr(tmp, TypeEntries::type_mask); 3888 __ jcc(Assembler::zero, ok); 3889 3890 __ stop("unexpected profiling mismatch"); 3891 __ bind(ok); 3892 __ pop(tmp); 3893 } 3894 #else 3895 __ jccb(Assembler::zero, next); 3896 #endif 3897 // first time here. Set profile type. 3898 __ movptr(mdo_addr, tmp); 3899 } else { 3900 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3901 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3902 3903 __ movptr(tmp, mdo_addr); 3904 __ testptr(tmp, TypeEntries::type_unknown); 3905 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3906 3907 __ orptr(mdo_addr, TypeEntries::type_unknown); 3908 } 3909 } 3910 3911 __ bind(next); 3912 } 3913 } 3914 3915 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3916 Unimplemented(); 3917 } 3918 3919 3920 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3921 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3922 } 3923 3924 3925 void LIR_Assembler::align_backward_branch_target() { 3926 __ align(BytesPerWord); 3927 } 3928 3929 3930 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3931 if (left->is_single_cpu()) { 3932 __ negl(left->as_register()); 3933 move_regs(left->as_register(), dest->as_register()); 3934 3935 } else if (left->is_double_cpu()) { 3936 Register lo = left->as_register_lo(); 3937 #ifdef _LP64 3938 Register dst = dest->as_register_lo(); 3939 __ movptr(dst, lo); 3940 __ negptr(dst); 3941 #else 3942 Register hi = left->as_register_hi(); 3943 __ lneg(hi, lo); 3944 if (dest->as_register_lo() == hi) { 3945 assert(dest->as_register_hi() != lo, "destroying register"); 3946 move_regs(hi, dest->as_register_hi()); 3947 move_regs(lo, dest->as_register_lo()); 3948 } else { 3949 move_regs(lo, dest->as_register_lo()); 3950 move_regs(hi, dest->as_register_hi()); 3951 } 3952 #endif // _LP64 3953 3954 } else if (dest->is_single_xmm()) { 3955 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 3956 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 3957 } 3958 __ xorps(dest->as_xmm_float_reg(), 3959 ExternalAddress((address)float_signflip_pool)); 3960 3961 } else if (dest->is_double_xmm()) { 3962 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 3963 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 3964 } 3965 __ xorpd(dest->as_xmm_double_reg(), 3966 ExternalAddress((address)double_signflip_pool)); 3967 3968 } else if (left->is_single_fpu() || left->is_double_fpu()) { 3969 assert(left->fpu() == 0, "arg must be on TOS"); 3970 assert(dest->fpu() == 0, "dest must be TOS"); 3971 __ fchs(); 3972 3973 } else { 3974 ShouldNotReachHere(); 3975 } 3976 } 3977 3978 3979 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 3980 assert(addr->is_address() && dest->is_register(), "check"); 3981 Register reg; 3982 reg = dest->as_pointer_register(); 3983 __ lea(reg, as_Address(addr->as_address_ptr())); 3984 } 3985 3986 3987 3988 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3989 assert(!tmp->is_valid(), "don't need temporary"); 3990 __ call(RuntimeAddress(dest)); 3991 if (info != NULL) { 3992 add_call_info_here(info); 3993 } 3994 } 3995 3996 3997 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3998 assert(type == T_LONG, "only for volatile long fields"); 3999 4000 if (info != NULL) { 4001 add_debug_info_for_null_check_here(info); 4002 } 4003 4004 if (src->is_double_xmm()) { 4005 if (dest->is_double_cpu()) { 4006 #ifdef _LP64 4007 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 4008 #else 4009 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 4010 __ psrlq(src->as_xmm_double_reg(), 32); 4011 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 4012 #endif // _LP64 4013 } else if (dest->is_double_stack()) { 4014 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 4015 } else if (dest->is_address()) { 4016 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 4017 } else { 4018 ShouldNotReachHere(); 4019 } 4020 4021 } else if (dest->is_double_xmm()) { 4022 if (src->is_double_stack()) { 4023 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 4024 } else if (src->is_address()) { 4025 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 4026 } else { 4027 ShouldNotReachHere(); 4028 } 4029 4030 } else if (src->is_double_fpu()) { 4031 assert(src->fpu_regnrLo() == 0, "must be TOS"); 4032 if (dest->is_double_stack()) { 4033 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 4034 } else if (dest->is_address()) { 4035 __ fistp_d(as_Address(dest->as_address_ptr())); 4036 } else { 4037 ShouldNotReachHere(); 4038 } 4039 4040 } else if (dest->is_double_fpu()) { 4041 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 4042 if (src->is_double_stack()) { 4043 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 4044 } else if (src->is_address()) { 4045 __ fild_d(as_Address(src->as_address_ptr())); 4046 } else { 4047 ShouldNotReachHere(); 4048 } 4049 } else { 4050 ShouldNotReachHere(); 4051 } 4052 } 4053 4054 #ifdef ASSERT 4055 // emit run-time assertion 4056 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 4057 assert(op->code() == lir_assert, "must be"); 4058 4059 if (op->in_opr1()->is_valid()) { 4060 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 4061 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 4062 } else { 4063 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 4064 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 4065 } 4066 4067 Label ok; 4068 if (op->condition() != lir_cond_always) { 4069 Assembler::Condition acond = Assembler::zero; 4070 switch (op->condition()) { 4071 case lir_cond_equal: acond = Assembler::equal; break; 4072 case lir_cond_notEqual: acond = Assembler::notEqual; break; 4073 case lir_cond_less: acond = Assembler::less; break; 4074 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 4075 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 4076 case lir_cond_greater: acond = Assembler::greater; break; 4077 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 4078 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 4079 default: ShouldNotReachHere(); 4080 } 4081 __ jcc(acond, ok); 4082 } 4083 if (op->halt()) { 4084 const char* str = __ code_string(op->msg()); 4085 __ stop(str); 4086 } else { 4087 breakpoint(); 4088 } 4089 __ bind(ok); 4090 } 4091 #endif 4092 4093 void LIR_Assembler::membar() { 4094 // QQQ sparc TSO uses this, 4095 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4096 } 4097 4098 void LIR_Assembler::membar_acquire() { 4099 // No x86 machines currently require load fences 4100 // __ load_fence(); 4101 } 4102 4103 void LIR_Assembler::membar_release() { 4104 // No x86 machines currently require store fences 4105 // __ store_fence(); 4106 } 4107 4108 void LIR_Assembler::membar_loadload() { 4109 // no-op 4110 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 4111 } 4112 4113 void LIR_Assembler::membar_storestore() { 4114 // no-op 4115 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 4116 } 4117 4118 void LIR_Assembler::membar_loadstore() { 4119 // no-op 4120 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 4121 } 4122 4123 void LIR_Assembler::membar_storeload() { 4124 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4125 } 4126 4127 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 4128 assert(result_reg->is_register(), "check"); 4129 #ifdef _LP64 4130 // __ get_thread(result_reg->as_register_lo()); 4131 __ mov(result_reg->as_register(), r15_thread); 4132 #else 4133 __ get_thread(result_reg->as_register()); 4134 #endif // _LP64 4135 } 4136 4137 4138 void LIR_Assembler::peephole(LIR_List*) { 4139 // do nothing for now 4140 } 4141 4142 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 4143 assert(data == dest, "xchg/xadd uses only 2 operands"); 4144 4145 if (data->type() == T_INT) { 4146 if (code == lir_xadd) { 4147 if (os::is_MP()) { 4148 __ lock(); 4149 } 4150 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 4151 } else { 4152 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 4153 } 4154 } else if (data->is_oop()) { 4155 assert (code == lir_xchg, "xadd for oops"); 4156 Register obj = data->as_register(); 4157 #ifdef _LP64 4158 if (UseCompressedOops) { 4159 __ encode_heap_oop(obj); 4160 __ xchgl(obj, as_Address(src->as_address_ptr())); 4161 __ decode_heap_oop(obj); 4162 } else { 4163 __ xchgptr(obj, as_Address(src->as_address_ptr())); 4164 } 4165 #else 4166 __ xchgl(obj, as_Address(src->as_address_ptr())); 4167 #endif 4168 } else if (data->type() == T_LONG) { 4169 #ifdef _LP64 4170 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 4171 if (code == lir_xadd) { 4172 if (os::is_MP()) { 4173 __ lock(); 4174 } 4175 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 4176 } else { 4177 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 4178 } 4179 #else 4180 ShouldNotReachHere(); 4181 #endif 4182 } else { 4183 ShouldNotReachHere(); 4184 } 4185 } 4186 #undef __