1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "asm/assembler.hpp" 29 #include "c1/c1_CodeStubs.hpp" 30 #include "c1/c1_Compilation.hpp" 31 #include "c1/c1_LIRAssembler.hpp" 32 #include "c1/c1_MacroAssembler.hpp" 33 #include "c1/c1_Runtime1.hpp" 34 #include "c1/c1_ValueStack.hpp" 35 #include "ci/ciArrayKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "code/compiledIC.hpp" 38 #include "gc/shared/barrierSet.hpp" 39 #include "gc/shared/cardTableBarrierSet.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "nativeInst_aarch64.hpp" 42 #include "oops/objArrayKlass.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "vmreg_aarch64.inline.hpp" 46 47 48 49 #ifndef PRODUCT 50 #define COMMENT(x) do { __ block_comment(x); } while (0) 51 #else 52 #define COMMENT(x) 53 #endif 54 55 NEEDS_CLEANUP // remove this definitions ? 56 const Register IC_Klass = rscratch2; // where the IC klass is cached 57 const Register SYNC_header = r0; // synchronization header 58 const Register SHIFT_count = r0; // where count for shift operations must be 59 60 #define __ _masm-> 61 62 63 static void select_different_registers(Register preserve, 64 Register extra, 65 Register &tmp1, 66 Register &tmp2) { 67 if (tmp1 == preserve) { 68 assert_different_registers(tmp1, tmp2, extra); 69 tmp1 = extra; 70 } else if (tmp2 == preserve) { 71 assert_different_registers(tmp1, tmp2, extra); 72 tmp2 = extra; 73 } 74 assert_different_registers(preserve, tmp1, tmp2); 75 } 76 77 78 79 static void select_different_registers(Register preserve, 80 Register extra, 81 Register &tmp1, 82 Register &tmp2, 83 Register &tmp3) { 84 if (tmp1 == preserve) { 85 assert_different_registers(tmp1, tmp2, tmp3, extra); 86 tmp1 = extra; 87 } else if (tmp2 == preserve) { 88 assert_different_registers(tmp1, tmp2, tmp3, extra); 89 tmp2 = extra; 90 } else if (tmp3 == preserve) { 91 assert_different_registers(tmp1, tmp2, tmp3, extra); 92 tmp3 = extra; 93 } 94 assert_different_registers(preserve, tmp1, tmp2, tmp3); 95 } 96 97 98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } 99 100 101 LIR_Opr LIR_Assembler::receiverOpr() { 102 return FrameMap::receiver_opr; 103 } 104 105 LIR_Opr LIR_Assembler::osrBufferPointer() { 106 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 107 } 108 109 //--------------fpu register translations----------------------- 110 111 112 address LIR_Assembler::float_constant(float f) { 113 address const_addr = __ float_constant(f); 114 if (const_addr == NULL) { 115 bailout("const section overflow"); 116 return __ code()->consts()->start(); 117 } else { 118 return const_addr; 119 } 120 } 121 122 123 address LIR_Assembler::double_constant(double d) { 124 address const_addr = __ double_constant(d); 125 if (const_addr == NULL) { 126 bailout("const section overflow"); 127 return __ code()->consts()->start(); 128 } else { 129 return const_addr; 130 } 131 } 132 133 address LIR_Assembler::int_constant(jlong n) { 134 address const_addr = __ long_constant(n); 135 if (const_addr == NULL) { 136 bailout("const section overflow"); 137 return __ code()->consts()->start(); 138 } else { 139 return const_addr; 140 } 141 } 142 143 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); } 144 145 void LIR_Assembler::reset_FPU() { Unimplemented(); } 146 147 void LIR_Assembler::fpop() { Unimplemented(); } 148 149 void LIR_Assembler::fxch(int i) { Unimplemented(); } 150 151 void LIR_Assembler::fld(int i) { Unimplemented(); } 152 153 void LIR_Assembler::ffree(int i) { Unimplemented(); } 154 155 void LIR_Assembler::breakpoint() { Unimplemented(); } 156 157 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } 158 159 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } 160 161 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; } 162 //------------------------------------------- 163 164 static Register as_reg(LIR_Opr op) { 165 return op->is_double_cpu() ? op->as_register_lo() : op->as_register(); 166 } 167 168 static jlong as_long(LIR_Opr data) { 169 jlong result; 170 switch (data->type()) { 171 case T_INT: 172 result = (data->as_jint()); 173 break; 174 case T_LONG: 175 result = (data->as_jlong()); 176 break; 177 default: 178 ShouldNotReachHere(); 179 result = 0; // unreachable 180 } 181 return result; 182 } 183 184 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 185 Register base = addr->base()->as_pointer_register(); 186 LIR_Opr opr = addr->index(); 187 if (opr->is_cpu_register()) { 188 Register index; 189 if (opr->is_single_cpu()) 190 index = opr->as_register(); 191 else 192 index = opr->as_register_lo(); 193 assert(addr->disp() == 0, "must be"); 194 switch(opr->type()) { 195 case T_INT: 196 return Address(base, index, Address::sxtw(addr->scale())); 197 case T_LONG: 198 return Address(base, index, Address::lsl(addr->scale())); 199 default: 200 ShouldNotReachHere(); 201 } 202 } else { 203 intptr_t addr_offset = intptr_t(addr->disp()); 204 if (Address::offset_ok_for_immed(addr_offset, addr->scale())) 205 return Address(base, addr_offset, Address::lsl(addr->scale())); 206 else { 207 __ mov(tmp, addr_offset); 208 return Address(base, tmp, Address::lsl(addr->scale())); 209 } 210 } 211 return Address(); 212 } 213 214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 215 ShouldNotReachHere(); 216 return Address(); 217 } 218 219 Address LIR_Assembler::as_Address(LIR_Address* addr) { 220 return as_Address(addr, rscratch1); 221 } 222 223 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 224 return as_Address(addr, rscratch1); // Ouch 225 // FIXME: This needs to be much more clever. See x86. 226 } 227 228 229 void LIR_Assembler::osr_entry() { 230 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 231 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 232 ValueStack* entry_state = osr_entry->state(); 233 int number_of_locks = entry_state->locks_size(); 234 235 // we jump here if osr happens with the interpreter 236 // state set up to continue at the beginning of the 237 // loop that triggered osr - in particular, we have 238 // the following registers setup: 239 // 240 // r2: osr buffer 241 // 242 243 // build frame 244 ciMethod* m = compilation()->method(); 245 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 246 247 // OSR buffer is 248 // 249 // locals[nlocals-1..0] 250 // monitors[0..number_of_locks] 251 // 252 // locals is a direct copy of the interpreter frame so in the osr buffer 253 // so first slot in the local array is the last local from the interpreter 254 // and last slot is local[0] (receiver) from the interpreter 255 // 256 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 257 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 258 // in the interpreter frame (the method lock if a sync method) 259 260 // Initialize monitors in the compiled activation. 261 // r2: pointer to osr buffer 262 // 263 // All other registers are dead at this point and the locals will be 264 // copied into place by code emitted in the IR. 265 266 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 267 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 268 int monitor_offset = BytesPerWord * method()->max_locals() + 269 (2 * BytesPerWord) * (number_of_locks - 1); 270 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 271 // the OSR buffer using 2 word entries: first the lock and then 272 // the oop. 273 for (int i = 0; i < number_of_locks; i++) { 274 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 275 #ifdef ASSERT 276 // verify the interpreter's monitor has a non-null object 277 { 278 Label L; 279 __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 280 __ cbnz(rscratch1, L); 281 __ stop("locked object is NULL"); 282 __ bind(L); 283 } 284 #endif 285 __ ldr(r19, Address(OSR_buf, slot_offset + 0)); 286 __ str(r19, frame_map()->address_for_monitor_lock(i)); 287 __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 288 __ str(r19, frame_map()->address_for_monitor_object(i)); 289 } 290 } 291 } 292 293 294 // inline cache check; done before the frame is built. 295 int LIR_Assembler::check_icache() { 296 Register receiver = FrameMap::receiver_opr->as_register(); 297 Register ic_klass = IC_Klass; 298 int start_offset = __ offset(); 299 __ inline_cache_check(receiver, ic_klass); 300 301 // if icache check fails, then jump to runtime routine 302 // Note: RECEIVER must still contain the receiver! 303 Label dont; 304 __ br(Assembler::EQ, dont); 305 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 306 307 // We align the verified entry point unless the method body 308 // (including its inline cache check) will fit in a single 64-byte 309 // icache line. 310 if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) { 311 // force alignment after the cache check. 312 __ align(CodeEntryAlignment); 313 } 314 315 __ bind(dont); 316 return start_offset; 317 } 318 319 320 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 321 if (o == NULL) { 322 __ mov(reg, zr); 323 } else { 324 __ movoop(reg, o, /*immediate*/true); 325 } 326 } 327 328 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { 329 address target = NULL; 330 relocInfo::relocType reloc_type = relocInfo::none; 331 332 switch (patching_id(info)) { 333 case PatchingStub::access_field_id: 334 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 335 reloc_type = relocInfo::section_word_type; 336 break; 337 case PatchingStub::load_klass_id: 338 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 339 reloc_type = relocInfo::metadata_type; 340 break; 341 case PatchingStub::load_mirror_id: 342 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 343 reloc_type = relocInfo::oop_type; 344 break; 345 case PatchingStub::load_appendix_id: 346 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 347 reloc_type = relocInfo::oop_type; 348 break; 349 default: ShouldNotReachHere(); 350 } 351 352 __ far_call(RuntimeAddress(target)); 353 add_call_info_here(info); 354 } 355 356 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 357 deoptimize_trap(info); 358 } 359 360 361 // This specifies the rsp decrement needed to build the frame 362 int LIR_Assembler::initial_frame_size_in_bytes() const { 363 // if rounding, must let FrameMap know! 364 365 // The frame_map records size in slots (32bit word) 366 367 // subtract two words to account for return address and link 368 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 369 } 370 371 372 int LIR_Assembler::emit_exception_handler() { 373 // if the last instruction is a call (typically to do a throw which 374 // is coming at the end after block reordering) the return address 375 // must still point into the code area in order to avoid assertion 376 // failures when searching for the corresponding bci => add a nop 377 // (was bug 5/14/1999 - gri) 378 __ nop(); 379 380 // generate code for exception handler 381 address handler_base = __ start_a_stub(exception_handler_size()); 382 if (handler_base == NULL) { 383 // not enough space left for the handler 384 bailout("exception handler overflow"); 385 return -1; 386 } 387 388 int offset = code_offset(); 389 390 // the exception oop and pc are in r0, and r3 391 // no other registers need to be preserved, so invalidate them 392 __ invalidate_registers(false, true, true, false, true, true); 393 394 // check that there is really an exception 395 __ verify_not_null_oop(r0); 396 397 // search an exception handler (r0: exception oop, r3: throwing pc) 398 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here(); 399 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 400 __ end_a_stub(); 401 402 return offset; 403 } 404 405 406 // Emit the code to remove the frame from the stack in the exception 407 // unwind path. 408 int LIR_Assembler::emit_unwind_handler() { 409 #ifndef PRODUCT 410 if (CommentedAssembly) { 411 _masm->block_comment("Unwind handler"); 412 } 413 #endif 414 415 int offset = code_offset(); 416 417 // Fetch the exception from TLS and clear out exception related thread state 418 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 419 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 420 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 421 422 __ bind(_unwind_handler_entry); 423 __ verify_not_null_oop(r0); 424 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 425 __ mov(r19, r0); // Preserve the exception 426 } 427 428 // Preform needed unlocking 429 MonitorExitStub* stub = NULL; 430 if (method()->is_synchronized()) { 431 monitor_address(0, FrameMap::r0_opr); 432 stub = new MonitorExitStub(FrameMap::r0_opr, true, 0); 433 __ unlock_object(r5, r4, r0, *stub->entry()); 434 __ bind(*stub->continuation()); 435 } 436 437 if (compilation()->env()->dtrace_method_probes()) { 438 __ call_Unimplemented(); 439 #if 0 440 __ movptr(Address(rsp, 0), rax); 441 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 442 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 443 #endif 444 } 445 446 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 447 __ mov(r0, r19); // Restore the exception 448 } 449 450 // remove the activation and dispatch to the unwind handler 451 __ block_comment("remove_frame and dispatch to the unwind handler"); 452 __ remove_frame(initial_frame_size_in_bytes()); 453 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 454 455 // Emit the slow path assembly 456 if (stub != NULL) { 457 stub->emit_code(this); 458 } 459 460 return offset; 461 } 462 463 464 int LIR_Assembler::emit_deopt_handler() { 465 // if the last instruction is a call (typically to do a throw which 466 // is coming at the end after block reordering) the return address 467 // must still point into the code area in order to avoid assertion 468 // failures when searching for the corresponding bci => add a nop 469 // (was bug 5/14/1999 - gri) 470 __ nop(); 471 472 // generate code for exception handler 473 address handler_base = __ start_a_stub(deopt_handler_size()); 474 if (handler_base == NULL) { 475 // not enough space left for the handler 476 bailout("deopt handler overflow"); 477 return -1; 478 } 479 480 int offset = code_offset(); 481 482 __ adr(lr, pc()); 483 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 484 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 485 __ end_a_stub(); 486 487 return offset; 488 } 489 490 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { 491 _masm->code_section()->relocate(adr, relocInfo::poll_type); 492 int pc_offset = code_offset(); 493 flush_debug_info(pc_offset); 494 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 495 if (info->exception_handlers() != NULL) { 496 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 497 } 498 } 499 500 void LIR_Assembler::return_op(LIR_Opr result) { 501 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,"); 502 503 // Pop the stack before the safepoint code 504 __ remove_frame(initial_frame_size_in_bytes()); 505 506 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 507 __ reserved_stack_check(); 508 } 509 510 address polling_page(os::get_polling_page()); 511 __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type); 512 __ ret(lr); 513 } 514 515 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 516 address polling_page(os::get_polling_page()); 517 guarantee(info != NULL, "Shouldn't be NULL"); 518 assert(os::is_poll_address(polling_page), "should be"); 519 __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type); 520 add_debug_info_for_branch(info); // This isn't just debug info: 521 // it's the oop map 522 __ read_polling_page(rscratch1, relocInfo::poll_type); 523 return __ offset(); 524 } 525 526 527 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 528 if (from_reg == r31_sp) 529 from_reg = sp; 530 if (to_reg == r31_sp) 531 to_reg = sp; 532 __ mov(to_reg, from_reg); 533 } 534 535 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } 536 537 538 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 539 assert(src->is_constant(), "should not call otherwise"); 540 assert(dest->is_register(), "should not call otherwise"); 541 LIR_Const* c = src->as_constant_ptr(); 542 543 switch (c->type()) { 544 case T_INT: { 545 assert(patch_code == lir_patch_none, "no patching handled here"); 546 __ movw(dest->as_register(), c->as_jint()); 547 break; 548 } 549 550 case T_ADDRESS: { 551 assert(patch_code == lir_patch_none, "no patching handled here"); 552 __ mov(dest->as_register(), c->as_jint()); 553 break; 554 } 555 556 case T_LONG: { 557 assert(patch_code == lir_patch_none, "no patching handled here"); 558 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong()); 559 break; 560 } 561 562 case T_OBJECT: { 563 if (patch_code == lir_patch_none) { 564 jobject2reg(c->as_jobject(), dest->as_register()); 565 } else { 566 jobject2reg_with_patching(dest->as_register(), info); 567 } 568 break; 569 } 570 571 case T_METADATA: { 572 if (patch_code != lir_patch_none) { 573 klass2reg_with_patching(dest->as_register(), info); 574 } else { 575 __ mov_metadata(dest->as_register(), c->as_metadata()); 576 } 577 break; 578 } 579 580 case T_FLOAT: { 581 if (__ operand_valid_for_float_immediate(c->as_jfloat())) { 582 __ fmovs(dest->as_float_reg(), (c->as_jfloat())); 583 } else { 584 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat()))); 585 __ ldrs(dest->as_float_reg(), Address(rscratch1)); 586 } 587 break; 588 } 589 590 case T_DOUBLE: { 591 if (__ operand_valid_for_float_immediate(c->as_jdouble())) { 592 __ fmovd(dest->as_double_reg(), (c->as_jdouble())); 593 } else { 594 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble()))); 595 __ ldrd(dest->as_double_reg(), Address(rscratch1)); 596 } 597 break; 598 } 599 600 default: 601 ShouldNotReachHere(); 602 } 603 } 604 605 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 606 LIR_Const* c = src->as_constant_ptr(); 607 switch (c->type()) { 608 case T_OBJECT: 609 { 610 if (! c->as_jobject()) 611 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 612 else { 613 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 614 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 615 } 616 } 617 break; 618 case T_ADDRESS: 619 { 620 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 621 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 622 } 623 case T_INT: 624 case T_FLOAT: 625 { 626 Register reg = zr; 627 if (c->as_jint_bits() == 0) 628 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 629 else { 630 __ movw(rscratch1, c->as_jint_bits()); 631 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix())); 632 } 633 } 634 break; 635 case T_LONG: 636 case T_DOUBLE: 637 { 638 Register reg = zr; 639 if (c->as_jlong_bits() == 0) 640 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(), 641 lo_word_offset_in_bytes)); 642 else { 643 __ mov(rscratch1, (intptr_t)c->as_jlong_bits()); 644 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(), 645 lo_word_offset_in_bytes)); 646 } 647 } 648 break; 649 default: 650 ShouldNotReachHere(); 651 } 652 } 653 654 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 655 assert(src->is_constant(), "should not call otherwise"); 656 LIR_Const* c = src->as_constant_ptr(); 657 LIR_Address* to_addr = dest->as_address_ptr(); 658 659 void (Assembler::* insn)(Register Rt, const Address &adr); 660 661 switch (type) { 662 case T_ADDRESS: 663 assert(c->as_jint() == 0, "should be"); 664 insn = &Assembler::str; 665 break; 666 case T_LONG: 667 assert(c->as_jlong() == 0, "should be"); 668 insn = &Assembler::str; 669 break; 670 case T_INT: 671 assert(c->as_jint() == 0, "should be"); 672 insn = &Assembler::strw; 673 break; 674 case T_OBJECT: 675 case T_ARRAY: 676 assert(c->as_jobject() == 0, "should be"); 677 if (UseCompressedOops && !wide) { 678 insn = &Assembler::strw; 679 } else { 680 insn = &Assembler::str; 681 } 682 break; 683 case T_CHAR: 684 case T_SHORT: 685 assert(c->as_jint() == 0, "should be"); 686 insn = &Assembler::strh; 687 break; 688 case T_BOOLEAN: 689 case T_BYTE: 690 assert(c->as_jint() == 0, "should be"); 691 insn = &Assembler::strb; 692 break; 693 default: 694 ShouldNotReachHere(); 695 insn = &Assembler::str; // unreachable 696 } 697 698 if (info) add_debug_info_for_null_check_here(info); 699 (_masm->*insn)(zr, as_Address(to_addr, rscratch1)); 700 } 701 702 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 703 assert(src->is_register(), "should not call otherwise"); 704 assert(dest->is_register(), "should not call otherwise"); 705 706 // move between cpu-registers 707 if (dest->is_single_cpu()) { 708 if (src->type() == T_LONG) { 709 // Can do LONG -> OBJECT 710 move_regs(src->as_register_lo(), dest->as_register()); 711 return; 712 } 713 assert(src->is_single_cpu(), "must match"); 714 if (src->type() == T_OBJECT) { 715 __ verify_oop(src->as_register()); 716 } 717 move_regs(src->as_register(), dest->as_register()); 718 719 } else if (dest->is_double_cpu()) { 720 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 721 // Surprising to me but we can see move of a long to t_object 722 __ verify_oop(src->as_register()); 723 move_regs(src->as_register(), dest->as_register_lo()); 724 return; 725 } 726 assert(src->is_double_cpu(), "must match"); 727 Register f_lo = src->as_register_lo(); 728 Register f_hi = src->as_register_hi(); 729 Register t_lo = dest->as_register_lo(); 730 Register t_hi = dest->as_register_hi(); 731 assert(f_hi == f_lo, "must be same"); 732 assert(t_hi == t_lo, "must be same"); 733 move_regs(f_lo, t_lo); 734 735 } else if (dest->is_single_fpu()) { 736 __ fmovs(dest->as_float_reg(), src->as_float_reg()); 737 738 } else if (dest->is_double_fpu()) { 739 __ fmovd(dest->as_double_reg(), src->as_double_reg()); 740 741 } else { 742 ShouldNotReachHere(); 743 } 744 } 745 746 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 747 if (src->is_single_cpu()) { 748 if (type == T_ARRAY || type == T_OBJECT) { 749 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 750 __ verify_oop(src->as_register()); 751 } else if (type == T_METADATA || type == T_DOUBLE) { 752 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 753 } else { 754 __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 755 } 756 757 } else if (src->is_double_cpu()) { 758 Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 759 __ str(src->as_register_lo(), dest_addr_LO); 760 761 } else if (src->is_single_fpu()) { 762 Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 763 __ strs(src->as_float_reg(), dest_addr); 764 765 } else if (src->is_double_fpu()) { 766 Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 767 __ strd(src->as_double_reg(), dest_addr); 768 769 } else { 770 ShouldNotReachHere(); 771 } 772 773 } 774 775 776 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 777 LIR_Address* to_addr = dest->as_address_ptr(); 778 PatchingStub* patch = NULL; 779 Register compressed_src = rscratch1; 780 781 if (patch_code != lir_patch_none) { 782 deoptimize_trap(info); 783 return; 784 } 785 786 if (type == T_ARRAY || type == T_OBJECT) { 787 __ verify_oop(src->as_register()); 788 789 if (UseCompressedOops && !wide) { 790 __ encode_heap_oop(compressed_src, src->as_register()); 791 } else { 792 compressed_src = src->as_register(); 793 } 794 } 795 796 int null_check_here = code_offset(); 797 switch (type) { 798 case T_FLOAT: { 799 __ strs(src->as_float_reg(), as_Address(to_addr)); 800 break; 801 } 802 803 case T_DOUBLE: { 804 __ strd(src->as_double_reg(), as_Address(to_addr)); 805 break; 806 } 807 808 case T_ARRAY: // fall through 809 case T_OBJECT: // fall through 810 if (UseCompressedOops && !wide) { 811 __ strw(compressed_src, as_Address(to_addr, rscratch2)); 812 } else { 813 __ str(compressed_src, as_Address(to_addr)); 814 } 815 break; 816 case T_METADATA: 817 // We get here to store a method pointer to the stack to pass to 818 // a dtrace runtime call. This can't work on 64 bit with 819 // compressed klass ptrs: T_METADATA can be a compressed klass 820 // ptr or a 64 bit method pointer. 821 ShouldNotReachHere(); 822 __ str(src->as_register(), as_Address(to_addr)); 823 break; 824 case T_ADDRESS: 825 __ str(src->as_register(), as_Address(to_addr)); 826 break; 827 case T_INT: 828 __ strw(src->as_register(), as_Address(to_addr)); 829 break; 830 831 case T_LONG: { 832 __ str(src->as_register_lo(), as_Address_lo(to_addr)); 833 break; 834 } 835 836 case T_BYTE: // fall through 837 case T_BOOLEAN: { 838 __ strb(src->as_register(), as_Address(to_addr)); 839 break; 840 } 841 842 case T_CHAR: // fall through 843 case T_SHORT: 844 __ strh(src->as_register(), as_Address(to_addr)); 845 break; 846 847 default: 848 ShouldNotReachHere(); 849 } 850 if (info != NULL) { 851 add_debug_info_for_null_check(null_check_here, info); 852 } 853 } 854 855 856 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 857 assert(src->is_stack(), "should not call otherwise"); 858 assert(dest->is_register(), "should not call otherwise"); 859 860 if (dest->is_single_cpu()) { 861 if (type == T_ARRAY || type == T_OBJECT) { 862 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 863 __ verify_oop(dest->as_register()); 864 } else if (type == T_METADATA) { 865 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 866 } else { 867 __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 868 } 869 870 } else if (dest->is_double_cpu()) { 871 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 872 __ ldr(dest->as_register_lo(), src_addr_LO); 873 874 } else if (dest->is_single_fpu()) { 875 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 876 __ ldrs(dest->as_float_reg(), src_addr); 877 878 } else if (dest->is_double_fpu()) { 879 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 880 __ ldrd(dest->as_double_reg(), src_addr); 881 882 } else { 883 ShouldNotReachHere(); 884 } 885 } 886 887 888 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 889 address target = NULL; 890 relocInfo::relocType reloc_type = relocInfo::none; 891 892 switch (patching_id(info)) { 893 case PatchingStub::access_field_id: 894 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 895 reloc_type = relocInfo::section_word_type; 896 break; 897 case PatchingStub::load_klass_id: 898 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 899 reloc_type = relocInfo::metadata_type; 900 break; 901 case PatchingStub::load_mirror_id: 902 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 903 reloc_type = relocInfo::oop_type; 904 break; 905 case PatchingStub::load_appendix_id: 906 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 907 reloc_type = relocInfo::oop_type; 908 break; 909 default: ShouldNotReachHere(); 910 } 911 912 __ far_call(RuntimeAddress(target)); 913 add_call_info_here(info); 914 } 915 916 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 917 918 LIR_Opr temp; 919 if (type == T_LONG || type == T_DOUBLE) 920 temp = FrameMap::rscratch1_long_opr; 921 else 922 temp = FrameMap::rscratch1_opr; 923 924 stack2reg(src, temp, src->type()); 925 reg2stack(temp, dest, dest->type(), false); 926 } 927 928 929 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 930 LIR_Address* addr = src->as_address_ptr(); 931 LIR_Address* from_addr = src->as_address_ptr(); 932 933 if (addr->base()->type() == T_OBJECT) { 934 __ verify_oop(addr->base()->as_pointer_register()); 935 } 936 937 if (patch_code != lir_patch_none) { 938 deoptimize_trap(info); 939 return; 940 } 941 942 if (info != NULL) { 943 add_debug_info_for_null_check_here(info); 944 } 945 int null_check_here = code_offset(); 946 switch (type) { 947 case T_FLOAT: { 948 __ ldrs(dest->as_float_reg(), as_Address(from_addr)); 949 break; 950 } 951 952 case T_DOUBLE: { 953 __ ldrd(dest->as_double_reg(), as_Address(from_addr)); 954 break; 955 } 956 957 case T_ARRAY: // fall through 958 case T_OBJECT: // fall through 959 if (UseCompressedOops && !wide) { 960 __ ldrw(dest->as_register(), as_Address(from_addr)); 961 } else { 962 __ ldr(dest->as_register(), as_Address(from_addr)); 963 } 964 break; 965 case T_METADATA: 966 // We get here to store a method pointer to the stack to pass to 967 // a dtrace runtime call. This can't work on 64 bit with 968 // compressed klass ptrs: T_METADATA can be a compressed klass 969 // ptr or a 64 bit method pointer. 970 ShouldNotReachHere(); 971 __ ldr(dest->as_register(), as_Address(from_addr)); 972 break; 973 case T_ADDRESS: 974 // FIXME: OMG this is a horrible kludge. Any offset from an 975 // address that matches klass_offset_in_bytes() will be loaded 976 // as a word, not a long. 977 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 978 __ ldrw(dest->as_register(), as_Address(from_addr)); 979 } else { 980 __ ldr(dest->as_register(), as_Address(from_addr)); 981 } 982 break; 983 case T_INT: 984 __ ldrw(dest->as_register(), as_Address(from_addr)); 985 break; 986 987 case T_LONG: { 988 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr)); 989 break; 990 } 991 992 case T_BYTE: 993 __ ldrsb(dest->as_register(), as_Address(from_addr)); 994 break; 995 case T_BOOLEAN: { 996 __ ldrb(dest->as_register(), as_Address(from_addr)); 997 break; 998 } 999 1000 case T_CHAR: 1001 __ ldrh(dest->as_register(), as_Address(from_addr)); 1002 break; 1003 case T_SHORT: 1004 __ ldrsh(dest->as_register(), as_Address(from_addr)); 1005 break; 1006 1007 default: 1008 ShouldNotReachHere(); 1009 } 1010 1011 if (type == T_ARRAY || type == T_OBJECT) { 1012 if (UseCompressedOops && !wide) { 1013 __ decode_heap_oop(dest->as_register()); 1014 } 1015 __ verify_oop(dest->as_register()); 1016 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1017 if (UseCompressedClassPointers) { 1018 __ decode_klass_not_null(dest->as_register()); 1019 } 1020 } 1021 } 1022 1023 1024 int LIR_Assembler::array_element_size(BasicType type) const { 1025 int elem_size = type2aelembytes(type); 1026 return exact_log2(elem_size); 1027 } 1028 1029 void LIR_Assembler::arithmetic_idiv(LIR_Op3* op, bool is_irem) { 1030 Register Rdividend = op->in_opr1()->as_register(); 1031 Register Rdivisor = op->in_opr2()->as_register(); 1032 Register Rscratch = op->in_opr3()->as_register(); 1033 Register Rresult = op->result_opr()->as_register(); 1034 int divisor = -1; 1035 1036 /* 1037 TODO: For some reason, using the Rscratch that gets passed in is 1038 not possible because the register allocator does not see the tmp reg 1039 as used, and assignes it the same register as Rdividend. We use rscratch1 1040 instead. 1041 1042 assert(Rdividend != Rscratch, ""); 1043 assert(Rdivisor != Rscratch, ""); 1044 */ 1045 1046 if (Rdivisor == noreg && is_power_of_2(divisor)) { 1047 // convert division by a power of two into some shifts and logical operations 1048 } 1049 1050 __ corrected_idivl(Rresult, Rdividend, Rdivisor, is_irem, rscratch1); 1051 } 1052 1053 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1054 switch (op->code()) { 1055 case lir_idiv: 1056 arithmetic_idiv(op, false); 1057 break; 1058 case lir_irem: 1059 arithmetic_idiv(op, true); 1060 break; 1061 case lir_fmad: 1062 __ fmaddd(op->result_opr()->as_double_reg(), 1063 op->in_opr1()->as_double_reg(), 1064 op->in_opr2()->as_double_reg(), 1065 op->in_opr3()->as_double_reg()); 1066 break; 1067 case lir_fmaf: 1068 __ fmadds(op->result_opr()->as_float_reg(), 1069 op->in_opr1()->as_float_reg(), 1070 op->in_opr2()->as_float_reg(), 1071 op->in_opr3()->as_float_reg()); 1072 break; 1073 default: ShouldNotReachHere(); break; 1074 } 1075 } 1076 1077 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1078 #ifdef ASSERT 1079 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1080 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1081 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1082 #endif 1083 1084 if (op->cond() == lir_cond_always) { 1085 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1086 __ b(*(op->label())); 1087 } else { 1088 Assembler::Condition acond; 1089 if (op->code() == lir_cond_float_branch) { 1090 bool is_unordered = (op->ublock() == op->block()); 1091 // Assembler::EQ does not permit unordered branches, so we add 1092 // another branch here. Likewise, Assembler::NE does not permit 1093 // ordered branches. 1094 if (is_unordered && op->cond() == lir_cond_equal 1095 || !is_unordered && op->cond() == lir_cond_notEqual) 1096 __ br(Assembler::VS, *(op->ublock()->label())); 1097 switch(op->cond()) { 1098 case lir_cond_equal: acond = Assembler::EQ; break; 1099 case lir_cond_notEqual: acond = Assembler::NE; break; 1100 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break; 1101 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break; 1102 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break; 1103 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break; 1104 default: ShouldNotReachHere(); 1105 acond = Assembler::EQ; // unreachable 1106 } 1107 } else { 1108 switch (op->cond()) { 1109 case lir_cond_equal: acond = Assembler::EQ; break; 1110 case lir_cond_notEqual: acond = Assembler::NE; break; 1111 case lir_cond_less: acond = Assembler::LT; break; 1112 case lir_cond_lessEqual: acond = Assembler::LE; break; 1113 case lir_cond_greaterEqual: acond = Assembler::GE; break; 1114 case lir_cond_greater: acond = Assembler::GT; break; 1115 case lir_cond_belowEqual: acond = Assembler::LS; break; 1116 case lir_cond_aboveEqual: acond = Assembler::HS; break; 1117 default: ShouldNotReachHere(); 1118 acond = Assembler::EQ; // unreachable 1119 } 1120 } 1121 __ br(acond,*(op->label())); 1122 } 1123 } 1124 1125 1126 1127 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1128 LIR_Opr src = op->in_opr(); 1129 LIR_Opr dest = op->result_opr(); 1130 1131 switch (op->bytecode()) { 1132 case Bytecodes::_i2f: 1133 { 1134 __ scvtfws(dest->as_float_reg(), src->as_register()); 1135 break; 1136 } 1137 case Bytecodes::_i2d: 1138 { 1139 __ scvtfwd(dest->as_double_reg(), src->as_register()); 1140 break; 1141 } 1142 case Bytecodes::_l2d: 1143 { 1144 __ scvtfd(dest->as_double_reg(), src->as_register_lo()); 1145 break; 1146 } 1147 case Bytecodes::_l2f: 1148 { 1149 __ scvtfs(dest->as_float_reg(), src->as_register_lo()); 1150 break; 1151 } 1152 case Bytecodes::_f2d: 1153 { 1154 __ fcvts(dest->as_double_reg(), src->as_float_reg()); 1155 break; 1156 } 1157 case Bytecodes::_d2f: 1158 { 1159 __ fcvtd(dest->as_float_reg(), src->as_double_reg()); 1160 break; 1161 } 1162 case Bytecodes::_i2c: 1163 { 1164 __ ubfx(dest->as_register(), src->as_register(), 0, 16); 1165 break; 1166 } 1167 case Bytecodes::_i2l: 1168 { 1169 __ sxtw(dest->as_register_lo(), src->as_register()); 1170 break; 1171 } 1172 case Bytecodes::_i2s: 1173 { 1174 __ sxth(dest->as_register(), src->as_register()); 1175 break; 1176 } 1177 case Bytecodes::_i2b: 1178 { 1179 __ sxtb(dest->as_register(), src->as_register()); 1180 break; 1181 } 1182 case Bytecodes::_l2i: 1183 { 1184 _masm->block_comment("FIXME: This could be a no-op"); 1185 __ uxtw(dest->as_register(), src->as_register_lo()); 1186 break; 1187 } 1188 case Bytecodes::_d2l: 1189 { 1190 __ fcvtzd(dest->as_register_lo(), src->as_double_reg()); 1191 break; 1192 } 1193 case Bytecodes::_f2i: 1194 { 1195 __ fcvtzsw(dest->as_register(), src->as_float_reg()); 1196 break; 1197 } 1198 case Bytecodes::_f2l: 1199 { 1200 __ fcvtzs(dest->as_register_lo(), src->as_float_reg()); 1201 break; 1202 } 1203 case Bytecodes::_d2i: 1204 { 1205 __ fcvtzdw(dest->as_register(), src->as_double_reg()); 1206 break; 1207 } 1208 default: ShouldNotReachHere(); 1209 } 1210 } 1211 1212 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1213 if (op->init_check()) { 1214 __ ldrb(rscratch1, Address(op->klass()->as_register(), 1215 InstanceKlass::init_state_offset())); 1216 __ cmpw(rscratch1, InstanceKlass::fully_initialized); 1217 add_debug_info_for_null_check_here(op->stub()->info()); 1218 __ br(Assembler::NE, *op->stub()->entry()); 1219 } 1220 __ allocate_object(op->obj()->as_register(), 1221 op->tmp1()->as_register(), 1222 op->tmp2()->as_register(), 1223 op->header_size(), 1224 op->object_size(), 1225 op->klass()->as_register(), 1226 *op->stub()->entry()); 1227 __ bind(*op->stub()->continuation()); 1228 } 1229 1230 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1231 Register len = op->len()->as_register(); 1232 __ uxtw(len, len); 1233 1234 if (UseSlowPath || 1235 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1236 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1237 __ b(*op->stub()->entry()); 1238 } else { 1239 Register tmp1 = op->tmp1()->as_register(); 1240 Register tmp2 = op->tmp2()->as_register(); 1241 Register tmp3 = op->tmp3()->as_register(); 1242 if (len == tmp1) { 1243 tmp1 = tmp3; 1244 } else if (len == tmp2) { 1245 tmp2 = tmp3; 1246 } else if (len == tmp3) { 1247 // everything is ok 1248 } else { 1249 __ mov(tmp3, len); 1250 } 1251 __ allocate_array(op->obj()->as_register(), 1252 len, 1253 tmp1, 1254 tmp2, 1255 arrayOopDesc::header_size(op->type()), 1256 array_element_size(op->type()), 1257 op->klass()->as_register(), 1258 *op->stub()->entry()); 1259 } 1260 __ bind(*op->stub()->continuation()); 1261 } 1262 1263 void LIR_Assembler::type_profile_helper(Register mdo, 1264 ciMethodData *md, ciProfileData *data, 1265 Register recv, Label* update_done) { 1266 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1267 Label next_test; 1268 // See if the receiver is receiver[n]. 1269 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1270 __ ldr(rscratch1, Address(rscratch2)); 1271 __ cmp(recv, rscratch1); 1272 __ br(Assembler::NE, next_test); 1273 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1274 __ addptr(data_addr, DataLayout::counter_increment); 1275 __ b(*update_done); 1276 __ bind(next_test); 1277 } 1278 1279 // Didn't find receiver; find next empty slot and fill it in 1280 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1281 Label next_test; 1282 __ lea(rscratch2, 1283 Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1284 Address recv_addr(rscratch2); 1285 __ ldr(rscratch1, recv_addr); 1286 __ cbnz(rscratch1, next_test); 1287 __ str(recv, recv_addr); 1288 __ mov(rscratch1, DataLayout::counter_increment); 1289 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); 1290 __ str(rscratch1, Address(rscratch2)); 1291 __ b(*update_done); 1292 __ bind(next_test); 1293 } 1294 } 1295 1296 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1297 // we always need a stub for the failure case. 1298 CodeStub* stub = op->stub(); 1299 Register obj = op->object()->as_register(); 1300 Register k_RInfo = op->tmp1()->as_register(); 1301 Register klass_RInfo = op->tmp2()->as_register(); 1302 Register dst = op->result_opr()->as_register(); 1303 ciKlass* k = op->klass(); 1304 Register Rtmp1 = noreg; 1305 1306 // check if it needs to be profiled 1307 ciMethodData* md; 1308 ciProfileData* data; 1309 1310 const bool should_profile = op->should_profile(); 1311 1312 if (should_profile) { 1313 ciMethod* method = op->profiled_method(); 1314 assert(method != NULL, "Should have method"); 1315 int bci = op->profiled_bci(); 1316 md = method->method_data_or_null(); 1317 assert(md != NULL, "Sanity"); 1318 data = md->bci_to_data(bci); 1319 assert(data != NULL, "need data for type check"); 1320 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1321 } 1322 Label profile_cast_success, profile_cast_failure; 1323 Label *success_target = should_profile ? &profile_cast_success : success; 1324 Label *failure_target = should_profile ? &profile_cast_failure : failure; 1325 1326 if (obj == k_RInfo) { 1327 k_RInfo = dst; 1328 } else if (obj == klass_RInfo) { 1329 klass_RInfo = dst; 1330 } 1331 if (k->is_loaded() && !UseCompressedClassPointers) { 1332 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1333 } else { 1334 Rtmp1 = op->tmp3()->as_register(); 1335 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1336 } 1337 1338 assert_different_registers(obj, k_RInfo, klass_RInfo); 1339 1340 if (should_profile) { 1341 Label not_null; 1342 __ cbnz(obj, not_null); 1343 // Object is null; update MDO and exit 1344 Register mdo = klass_RInfo; 1345 __ mov_metadata(mdo, md->constant_encoding()); 1346 Address data_addr 1347 = __ form_address(rscratch2, mdo, 1348 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 1349 0); 1350 __ ldrb(rscratch1, data_addr); 1351 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant()); 1352 __ strb(rscratch1, data_addr); 1353 __ b(*obj_is_null); 1354 __ bind(not_null); 1355 } else { 1356 __ cbz(obj, *obj_is_null); 1357 } 1358 1359 if (!k->is_loaded()) { 1360 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1361 } else { 1362 __ mov_metadata(k_RInfo, k->constant_encoding()); 1363 } 1364 __ verify_oop(obj); 1365 1366 if (op->fast_check()) { 1367 // get object class 1368 // not a safepoint as obj null check happens earlier 1369 __ load_klass(rscratch1, obj); 1370 __ cmp( rscratch1, k_RInfo); 1371 1372 __ br(Assembler::NE, *failure_target); 1373 // successful cast, fall through to profile or jump 1374 } else { 1375 // get object class 1376 // not a safepoint as obj null check happens earlier 1377 __ load_klass(klass_RInfo, obj); 1378 if (k->is_loaded()) { 1379 // See if we get an immediate positive hit 1380 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset()))); 1381 __ cmp(k_RInfo, rscratch1); 1382 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1383 __ br(Assembler::NE, *failure_target); 1384 // successful cast, fall through to profile or jump 1385 } else { 1386 // See if we get an immediate positive hit 1387 __ br(Assembler::EQ, *success_target); 1388 // check for self 1389 __ cmp(klass_RInfo, k_RInfo); 1390 __ br(Assembler::EQ, *success_target); 1391 1392 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1393 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1394 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1395 // result is a boolean 1396 __ cbzw(klass_RInfo, *failure_target); 1397 // successful cast, fall through to profile or jump 1398 } 1399 } else { 1400 // perform the fast part of the checking logic 1401 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1402 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1403 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1404 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1405 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1406 // result is a boolean 1407 __ cbz(k_RInfo, *failure_target); 1408 // successful cast, fall through to profile or jump 1409 } 1410 } 1411 if (should_profile) { 1412 Register mdo = klass_RInfo, recv = k_RInfo; 1413 __ bind(profile_cast_success); 1414 __ mov_metadata(mdo, md->constant_encoding()); 1415 __ load_klass(recv, obj); 1416 Label update_done; 1417 type_profile_helper(mdo, md, data, recv, success); 1418 __ b(*success); 1419 1420 __ bind(profile_cast_failure); 1421 __ mov_metadata(mdo, md->constant_encoding()); 1422 Address counter_addr 1423 = __ form_address(rscratch2, mdo, 1424 md->byte_offset_of_slot(data, CounterData::count_offset()), 1425 0); 1426 __ ldr(rscratch1, counter_addr); 1427 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1428 __ str(rscratch1, counter_addr); 1429 __ b(*failure); 1430 } 1431 __ b(*success); 1432 } 1433 1434 1435 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1436 const bool should_profile = op->should_profile(); 1437 1438 LIR_Code code = op->code(); 1439 if (code == lir_store_check) { 1440 Register value = op->object()->as_register(); 1441 Register array = op->array()->as_register(); 1442 Register k_RInfo = op->tmp1()->as_register(); 1443 Register klass_RInfo = op->tmp2()->as_register(); 1444 Register Rtmp1 = op->tmp3()->as_register(); 1445 1446 CodeStub* stub = op->stub(); 1447 1448 // check if it needs to be profiled 1449 ciMethodData* md; 1450 ciProfileData* data; 1451 1452 if (should_profile) { 1453 ciMethod* method = op->profiled_method(); 1454 assert(method != NULL, "Should have method"); 1455 int bci = op->profiled_bci(); 1456 md = method->method_data_or_null(); 1457 assert(md != NULL, "Sanity"); 1458 data = md->bci_to_data(bci); 1459 assert(data != NULL, "need data for type check"); 1460 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1461 } 1462 Label profile_cast_success, profile_cast_failure, done; 1463 Label *success_target = should_profile ? &profile_cast_success : &done; 1464 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); 1465 1466 if (should_profile) { 1467 Label not_null; 1468 __ cbnz(value, not_null); 1469 // Object is null; update MDO and exit 1470 Register mdo = klass_RInfo; 1471 __ mov_metadata(mdo, md->constant_encoding()); 1472 Address data_addr 1473 = __ form_address(rscratch2, mdo, 1474 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 1475 0); 1476 __ ldrb(rscratch1, data_addr); 1477 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant()); 1478 __ strb(rscratch1, data_addr); 1479 __ b(done); 1480 __ bind(not_null); 1481 } else { 1482 __ cbz(value, done); 1483 } 1484 1485 add_debug_info_for_null_check_here(op->info_for_exception()); 1486 __ load_klass(k_RInfo, array); 1487 __ load_klass(klass_RInfo, value); 1488 1489 // get instance klass (it's already uncompressed) 1490 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1491 // perform the fast part of the checking logic 1492 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1493 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1494 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1495 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1496 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1497 // result is a boolean 1498 __ cbzw(k_RInfo, *failure_target); 1499 // fall through to the success case 1500 1501 if (should_profile) { 1502 Register mdo = klass_RInfo, recv = k_RInfo; 1503 __ bind(profile_cast_success); 1504 __ mov_metadata(mdo, md->constant_encoding()); 1505 __ load_klass(recv, value); 1506 Label update_done; 1507 type_profile_helper(mdo, md, data, recv, &done); 1508 __ b(done); 1509 1510 __ bind(profile_cast_failure); 1511 __ mov_metadata(mdo, md->constant_encoding()); 1512 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1513 __ lea(rscratch2, counter_addr); 1514 __ ldr(rscratch1, Address(rscratch2)); 1515 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1516 __ str(rscratch1, Address(rscratch2)); 1517 __ b(*stub->entry()); 1518 } 1519 1520 __ bind(done); 1521 } else if (code == lir_checkcast) { 1522 Register obj = op->object()->as_register(); 1523 Register dst = op->result_opr()->as_register(); 1524 Label success; 1525 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1526 __ bind(success); 1527 if (dst != obj) { 1528 __ mov(dst, obj); 1529 } 1530 } else if (code == lir_instanceof) { 1531 Register obj = op->object()->as_register(); 1532 Register dst = op->result_opr()->as_register(); 1533 Label success, failure, done; 1534 emit_typecheck_helper(op, &success, &failure, &failure); 1535 __ bind(failure); 1536 __ mov(dst, zr); 1537 __ b(done); 1538 __ bind(success); 1539 __ mov(dst, 1); 1540 __ bind(done); 1541 } else { 1542 ShouldNotReachHere(); 1543 } 1544 } 1545 1546 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { 1547 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1548 __ cset(rscratch1, Assembler::NE); 1549 __ membar(__ AnyAny); 1550 } 1551 1552 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { 1553 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1554 __ cset(rscratch1, Assembler::NE); 1555 __ membar(__ AnyAny); 1556 } 1557 1558 1559 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1560 assert(VM_Version::supports_cx8(), "wrong machine"); 1561 Register addr; 1562 if (op->addr()->is_register()) { 1563 addr = as_reg(op->addr()); 1564 } else { 1565 assert(op->addr()->is_address(), "what else?"); 1566 LIR_Address* addr_ptr = op->addr()->as_address_ptr(); 1567 assert(addr_ptr->disp() == 0, "need 0 disp"); 1568 assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index"); 1569 addr = as_reg(addr_ptr->base()); 1570 } 1571 Register newval = as_reg(op->new_value()); 1572 Register cmpval = as_reg(op->cmp_value()); 1573 Label succeed, fail, around; 1574 1575 if (op->code() == lir_cas_obj) { 1576 if (UseCompressedOops) { 1577 Register t1 = op->tmp1()->as_register(); 1578 assert(op->tmp1()->is_valid(), "must be"); 1579 __ encode_heap_oop(t1, cmpval); 1580 cmpval = t1; 1581 __ encode_heap_oop(rscratch2, newval); 1582 newval = rscratch2; 1583 casw(addr, newval, cmpval); 1584 } else { 1585 casl(addr, newval, cmpval); 1586 } 1587 } else if (op->code() == lir_cas_int) { 1588 casw(addr, newval, cmpval); 1589 } else { 1590 casl(addr, newval, cmpval); 1591 } 1592 } 1593 1594 1595 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1596 1597 Assembler::Condition acond, ncond; 1598 switch (condition) { 1599 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break; 1600 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break; 1601 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break; 1602 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break; 1603 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break; 1604 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break; 1605 case lir_cond_belowEqual: 1606 case lir_cond_aboveEqual: 1607 default: ShouldNotReachHere(); 1608 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable 1609 } 1610 1611 assert(result->is_single_cpu() || result->is_double_cpu(), 1612 "expect single register for result"); 1613 if (opr1->is_constant() && opr2->is_constant() 1614 && opr1->type() == T_INT && opr2->type() == T_INT) { 1615 jint val1 = opr1->as_jint(); 1616 jint val2 = opr2->as_jint(); 1617 if (val1 == 0 && val2 == 1) { 1618 __ cset(result->as_register(), ncond); 1619 return; 1620 } else if (val1 == 1 && val2 == 0) { 1621 __ cset(result->as_register(), acond); 1622 return; 1623 } 1624 } 1625 1626 if (opr1->is_constant() && opr2->is_constant() 1627 && opr1->type() == T_LONG && opr2->type() == T_LONG) { 1628 jlong val1 = opr1->as_jlong(); 1629 jlong val2 = opr2->as_jlong(); 1630 if (val1 == 0 && val2 == 1) { 1631 __ cset(result->as_register_lo(), ncond); 1632 return; 1633 } else if (val1 == 1 && val2 == 0) { 1634 __ cset(result->as_register_lo(), acond); 1635 return; 1636 } 1637 } 1638 1639 if (opr1->is_stack()) { 1640 stack2reg(opr1, FrameMap::rscratch1_opr, result->type()); 1641 opr1 = FrameMap::rscratch1_opr; 1642 } else if (opr1->is_constant()) { 1643 LIR_Opr tmp 1644 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr; 1645 const2reg(opr1, tmp, lir_patch_none, NULL); 1646 opr1 = tmp; 1647 } 1648 1649 if (opr2->is_stack()) { 1650 stack2reg(opr2, FrameMap::rscratch2_opr, result->type()); 1651 opr2 = FrameMap::rscratch2_opr; 1652 } else if (opr2->is_constant()) { 1653 LIR_Opr tmp 1654 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr; 1655 const2reg(opr2, tmp, lir_patch_none, NULL); 1656 opr2 = tmp; 1657 } 1658 1659 if (result->type() == T_LONG) 1660 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond); 1661 else 1662 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond); 1663 } 1664 1665 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1666 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1667 1668 if (left->is_single_cpu()) { 1669 Register lreg = left->as_register(); 1670 Register dreg = as_reg(dest); 1671 1672 if (right->is_single_cpu()) { 1673 // cpu register - cpu register 1674 1675 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, 1676 "should be"); 1677 Register rreg = right->as_register(); 1678 switch (code) { 1679 case lir_add: __ addw (dest->as_register(), lreg, rreg); break; 1680 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break; 1681 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break; 1682 default: ShouldNotReachHere(); 1683 } 1684 1685 } else if (right->is_double_cpu()) { 1686 Register rreg = right->as_register_lo(); 1687 // single_cpu + double_cpu: can happen with obj+long 1688 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1689 switch (code) { 1690 case lir_add: __ add(dreg, lreg, rreg); break; 1691 case lir_sub: __ sub(dreg, lreg, rreg); break; 1692 default: ShouldNotReachHere(); 1693 } 1694 } else if (right->is_constant()) { 1695 // cpu register - constant 1696 jlong c; 1697 1698 // FIXME. This is fugly: we really need to factor all this logic. 1699 switch(right->type()) { 1700 case T_LONG: 1701 c = right->as_constant_ptr()->as_jlong(); 1702 break; 1703 case T_INT: 1704 case T_ADDRESS: 1705 c = right->as_constant_ptr()->as_jint(); 1706 break; 1707 default: 1708 ShouldNotReachHere(); 1709 c = 0; // unreachable 1710 break; 1711 } 1712 1713 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1714 if (c == 0 && dreg == lreg) { 1715 COMMENT("effective nop elided"); 1716 return; 1717 } 1718 switch(left->type()) { 1719 case T_INT: 1720 switch (code) { 1721 case lir_add: __ addw(dreg, lreg, c); break; 1722 case lir_sub: __ subw(dreg, lreg, c); break; 1723 default: ShouldNotReachHere(); 1724 } 1725 break; 1726 case T_OBJECT: 1727 case T_ADDRESS: 1728 switch (code) { 1729 case lir_add: __ add(dreg, lreg, c); break; 1730 case lir_sub: __ sub(dreg, lreg, c); break; 1731 default: ShouldNotReachHere(); 1732 } 1733 break; 1734 ShouldNotReachHere(); 1735 } 1736 } else { 1737 ShouldNotReachHere(); 1738 } 1739 1740 } else if (left->is_double_cpu()) { 1741 Register lreg_lo = left->as_register_lo(); 1742 1743 if (right->is_double_cpu()) { 1744 // cpu register - cpu register 1745 Register rreg_lo = right->as_register_lo(); 1746 switch (code) { 1747 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1748 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1749 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1750 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break; 1751 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break; 1752 default: 1753 ShouldNotReachHere(); 1754 } 1755 1756 } else if (right->is_constant()) { 1757 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1758 Register dreg = as_reg(dest); 1759 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1760 if (c == 0 && dreg == lreg_lo) { 1761 COMMENT("effective nop elided"); 1762 return; 1763 } 1764 switch (code) { 1765 case lir_add: __ add(dreg, lreg_lo, c); break; 1766 case lir_sub: __ sub(dreg, lreg_lo, c); break; 1767 default: 1768 ShouldNotReachHere(); 1769 } 1770 } else { 1771 ShouldNotReachHere(); 1772 } 1773 } else if (left->is_single_fpu()) { 1774 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register"); 1775 switch (code) { 1776 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1777 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1778 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1779 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1780 default: 1781 ShouldNotReachHere(); 1782 } 1783 } else if (left->is_double_fpu()) { 1784 if (right->is_double_fpu()) { 1785 // cpu register - cpu register 1786 switch (code) { 1787 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1788 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1789 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1790 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1791 default: 1792 ShouldNotReachHere(); 1793 } 1794 } else { 1795 if (right->is_constant()) { 1796 ShouldNotReachHere(); 1797 } 1798 ShouldNotReachHere(); 1799 } 1800 } else if (left->is_single_stack() || left->is_address()) { 1801 assert(left == dest, "left and dest must be equal"); 1802 ShouldNotReachHere(); 1803 } else { 1804 ShouldNotReachHere(); 1805 } 1806 } 1807 1808 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); } 1809 1810 1811 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1812 switch(code) { 1813 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break; 1814 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break; 1815 default : ShouldNotReachHere(); 1816 } 1817 } 1818 1819 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1820 1821 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); 1822 Register Rleft = left->is_single_cpu() ? left->as_register() : 1823 left->as_register_lo(); 1824 if (dst->is_single_cpu()) { 1825 Register Rdst = dst->as_register(); 1826 if (right->is_constant()) { 1827 switch (code) { 1828 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break; 1829 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break; 1830 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break; 1831 default: ShouldNotReachHere(); break; 1832 } 1833 } else { 1834 Register Rright = right->is_single_cpu() ? right->as_register() : 1835 right->as_register_lo(); 1836 switch (code) { 1837 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break; 1838 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break; 1839 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break; 1840 default: ShouldNotReachHere(); break; 1841 } 1842 } 1843 } else { 1844 Register Rdst = dst->as_register_lo(); 1845 if (right->is_constant()) { 1846 switch (code) { 1847 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break; 1848 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break; 1849 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break; 1850 default: ShouldNotReachHere(); break; 1851 } 1852 } else { 1853 Register Rright = right->is_single_cpu() ? right->as_register() : 1854 right->as_register_lo(); 1855 switch (code) { 1856 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break; 1857 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break; 1858 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break; 1859 default: ShouldNotReachHere(); break; 1860 } 1861 } 1862 } 1863 } 1864 1865 1866 1867 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); } 1868 1869 1870 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1871 if (opr1->is_constant() && opr2->is_single_cpu()) { 1872 // tableswitch 1873 Register reg = as_reg(opr2); 1874 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()]; 1875 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after); 1876 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { 1877 Register reg1 = as_reg(opr1); 1878 if (opr2->is_single_cpu()) { 1879 // cpu register - cpu register 1880 Register reg2 = opr2->as_register(); 1881 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1882 __ cmpoop(reg1, reg2); 1883 } else { 1884 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 1885 __ cmpw(reg1, reg2); 1886 } 1887 return; 1888 } 1889 if (opr2->is_double_cpu()) { 1890 // cpu register - cpu register 1891 Register reg2 = opr2->as_register_lo(); 1892 __ cmp(reg1, reg2); 1893 return; 1894 } 1895 1896 if (opr2->is_constant()) { 1897 bool is_32bit = false; // width of register operand 1898 jlong imm; 1899 1900 switch(opr2->type()) { 1901 case T_INT: 1902 imm = opr2->as_constant_ptr()->as_jint(); 1903 is_32bit = true; 1904 break; 1905 case T_LONG: 1906 imm = opr2->as_constant_ptr()->as_jlong(); 1907 break; 1908 case T_ADDRESS: 1909 imm = opr2->as_constant_ptr()->as_jint(); 1910 break; 1911 case T_OBJECT: 1912 case T_ARRAY: 1913 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1); 1914 __ cmpoop(reg1, rscratch1); 1915 return; 1916 default: 1917 ShouldNotReachHere(); 1918 imm = 0; // unreachable 1919 break; 1920 } 1921 1922 if (Assembler::operand_valid_for_add_sub_immediate(imm)) { 1923 if (is_32bit) 1924 __ cmpw(reg1, imm); 1925 else 1926 __ cmp(reg1, imm); 1927 return; 1928 } else { 1929 __ mov(rscratch1, imm); 1930 if (is_32bit) 1931 __ cmpw(reg1, rscratch1); 1932 else 1933 __ cmp(reg1, rscratch1); 1934 return; 1935 } 1936 } else 1937 ShouldNotReachHere(); 1938 } else if (opr1->is_single_fpu()) { 1939 FloatRegister reg1 = opr1->as_float_reg(); 1940 assert(opr2->is_single_fpu(), "expect single float register"); 1941 FloatRegister reg2 = opr2->as_float_reg(); 1942 __ fcmps(reg1, reg2); 1943 } else if (opr1->is_double_fpu()) { 1944 FloatRegister reg1 = opr1->as_double_reg(); 1945 assert(opr2->is_double_fpu(), "expect double float register"); 1946 FloatRegister reg2 = opr2->as_double_reg(); 1947 __ fcmpd(reg1, reg2); 1948 } else { 1949 ShouldNotReachHere(); 1950 } 1951 } 1952 1953 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1954 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1955 bool is_unordered_less = (code == lir_ucmp_fd2i); 1956 if (left->is_single_fpu()) { 1957 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1958 } else if (left->is_double_fpu()) { 1959 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1960 } else { 1961 ShouldNotReachHere(); 1962 } 1963 } else if (code == lir_cmp_l2i) { 1964 Label done; 1965 __ cmp(left->as_register_lo(), right->as_register_lo()); 1966 __ mov(dst->as_register(), (u_int64_t)-1L); 1967 __ br(Assembler::LT, done); 1968 __ csinc(dst->as_register(), zr, zr, Assembler::EQ); 1969 __ bind(done); 1970 } else { 1971 ShouldNotReachHere(); 1972 } 1973 } 1974 1975 1976 void LIR_Assembler::align_call(LIR_Code code) { } 1977 1978 1979 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 1980 address call = __ trampoline_call(Address(op->addr(), rtype)); 1981 if (call == NULL) { 1982 bailout("trampoline stub overflow"); 1983 return; 1984 } 1985 add_call_info(code_offset(), op->info()); 1986 } 1987 1988 1989 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 1990 address call = __ ic_call(op->addr()); 1991 if (call == NULL) { 1992 bailout("trampoline stub overflow"); 1993 return; 1994 } 1995 add_call_info(code_offset(), op->info()); 1996 } 1997 1998 1999 /* Currently, vtable-dispatch is only enabled for sparc platforms */ 2000 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 2001 ShouldNotReachHere(); 2002 } 2003 2004 2005 void LIR_Assembler::emit_static_call_stub() { 2006 address call_pc = __ pc(); 2007 address stub = __ start_a_stub(call_stub_size()); 2008 if (stub == NULL) { 2009 bailout("static call stub overflow"); 2010 return; 2011 } 2012 2013 int start = __ offset(); 2014 2015 __ relocate(static_stub_Relocation::spec(call_pc)); 2016 __ emit_static_call_stub(); 2017 2018 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size() 2019 <= call_stub_size(), "stub too big"); 2020 __ end_a_stub(); 2021 } 2022 2023 2024 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2025 assert(exceptionOop->as_register() == r0, "must match"); 2026 assert(exceptionPC->as_register() == r3, "must match"); 2027 2028 // exception object is not added to oop map by LinearScan 2029 // (LinearScan assumes that no oops are in fixed registers) 2030 info->add_register_oop(exceptionOop); 2031 Runtime1::StubID unwind_id; 2032 2033 // get current pc information 2034 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2035 int pc_for_athrow_offset = __ offset(); 2036 InternalAddress pc_for_athrow(__ pc()); 2037 __ adr(exceptionPC->as_register(), pc_for_athrow); 2038 add_call_info(pc_for_athrow_offset, info); // for exception handler 2039 2040 __ verify_not_null_oop(r0); 2041 // search an exception handler (r0: exception oop, r3: throwing pc) 2042 if (compilation()->has_fpu_code()) { 2043 unwind_id = Runtime1::handle_exception_id; 2044 } else { 2045 unwind_id = Runtime1::handle_exception_nofpu_id; 2046 } 2047 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2048 2049 // FIXME: enough room for two byte trap ???? 2050 __ nop(); 2051 } 2052 2053 2054 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2055 assert(exceptionOop->as_register() == r0, "must match"); 2056 2057 __ b(_unwind_handler_entry); 2058 } 2059 2060 2061 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2062 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2063 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2064 2065 switch (left->type()) { 2066 case T_INT: { 2067 switch (code) { 2068 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break; 2069 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break; 2070 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break; 2071 default: 2072 ShouldNotReachHere(); 2073 break; 2074 } 2075 break; 2076 case T_LONG: 2077 case T_ADDRESS: 2078 case T_OBJECT: 2079 switch (code) { 2080 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break; 2081 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break; 2082 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break; 2083 default: 2084 ShouldNotReachHere(); 2085 break; 2086 } 2087 break; 2088 default: 2089 ShouldNotReachHere(); 2090 break; 2091 } 2092 } 2093 } 2094 2095 2096 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2097 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2098 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2099 2100 switch (left->type()) { 2101 case T_INT: { 2102 switch (code) { 2103 case lir_shl: __ lslw (dreg, lreg, count); break; 2104 case lir_shr: __ asrw (dreg, lreg, count); break; 2105 case lir_ushr: __ lsrw (dreg, lreg, count); break; 2106 default: 2107 ShouldNotReachHere(); 2108 break; 2109 } 2110 break; 2111 case T_LONG: 2112 case T_ADDRESS: 2113 case T_OBJECT: 2114 switch (code) { 2115 case lir_shl: __ lsl (dreg, lreg, count); break; 2116 case lir_shr: __ asr (dreg, lreg, count); break; 2117 case lir_ushr: __ lsr (dreg, lreg, count); break; 2118 default: 2119 ShouldNotReachHere(); 2120 break; 2121 } 2122 break; 2123 default: 2124 ShouldNotReachHere(); 2125 break; 2126 } 2127 } 2128 } 2129 2130 2131 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 2132 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2133 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2134 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2135 __ str (r, Address(sp, offset_from_rsp_in_bytes)); 2136 } 2137 2138 2139 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 2140 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2141 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2142 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2143 __ mov (rscratch1, c); 2144 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2145 } 2146 2147 2148 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 2149 ShouldNotReachHere(); 2150 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2151 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2152 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2153 __ lea(rscratch1, __ constant_oop_address(o)); 2154 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2155 } 2156 2157 2158 // This code replaces a call to arraycopy; no exception may 2159 // be thrown in this code, they must be thrown in the System.arraycopy 2160 // activation frame; we could save some checks if this would not be the case 2161 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2162 ciArrayKlass* default_type = op->expected_type(); 2163 Register src = op->src()->as_register(); 2164 Register dst = op->dst()->as_register(); 2165 Register src_pos = op->src_pos()->as_register(); 2166 Register dst_pos = op->dst_pos()->as_register(); 2167 Register length = op->length()->as_register(); 2168 Register tmp = op->tmp()->as_register(); 2169 2170 CodeStub* stub = op->stub(); 2171 int flags = op->flags(); 2172 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2173 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2174 2175 // if we don't know anything, just go through the generic arraycopy 2176 if (default_type == NULL // || basic_type == T_OBJECT 2177 ) { 2178 Label done; 2179 assert(src == r1 && src_pos == r2, "mismatch in calling convention"); 2180 2181 // Save the arguments in case the generic arraycopy fails and we 2182 // have to fall back to the JNI stub 2183 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2184 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2185 __ str(src, Address(sp, 4*BytesPerWord)); 2186 2187 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2188 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 2189 2190 // The arguments are in java calling convention so we shift them 2191 // to C convention 2192 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 2193 __ mov(c_rarg0, j_rarg0); 2194 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 2195 __ mov(c_rarg1, j_rarg1); 2196 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 2197 __ mov(c_rarg2, j_rarg2); 2198 assert_different_registers(c_rarg3, j_rarg4); 2199 __ mov(c_rarg3, j_rarg3); 2200 __ mov(c_rarg4, j_rarg4); 2201 #ifndef PRODUCT 2202 if (PrintC1Statistics) { 2203 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 2204 } 2205 #endif 2206 __ far_call(RuntimeAddress(copyfunc_addr)); 2207 2208 __ cbz(r0, *stub->continuation()); 2209 2210 // Reload values from the stack so they are where the stub 2211 // expects them. 2212 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2213 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2214 __ ldr(src, Address(sp, 4*BytesPerWord)); 2215 2216 // r0 is -1^K where K == partial copied count 2217 __ eonw(rscratch1, r0, zr); 2218 // adjust length down and src/end pos up by partial copied count 2219 __ subw(length, length, rscratch1); 2220 __ addw(src_pos, src_pos, rscratch1); 2221 __ addw(dst_pos, dst_pos, rscratch1); 2222 __ b(*stub->entry()); 2223 2224 __ bind(*stub->continuation()); 2225 return; 2226 } 2227 2228 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2229 2230 int elem_size = type2aelembytes(basic_type); 2231 int shift_amount; 2232 int scale = exact_log2(elem_size); 2233 2234 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2235 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2236 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 2237 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2238 2239 // test for NULL 2240 if (flags & LIR_OpArrayCopy::src_null_check) { 2241 __ cbz(src, *stub->entry()); 2242 } 2243 if (flags & LIR_OpArrayCopy::dst_null_check) { 2244 __ cbz(dst, *stub->entry()); 2245 } 2246 2247 // If the compiler was not able to prove that exact type of the source or the destination 2248 // of the arraycopy is an array type, check at runtime if the source or the destination is 2249 // an instance type. 2250 if (flags & LIR_OpArrayCopy::type_check) { 2251 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2252 __ load_klass(tmp, dst); 2253 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2254 __ cmpw(rscratch1, Klass::_lh_neutral_value); 2255 __ br(Assembler::GE, *stub->entry()); 2256 } 2257 2258 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2259 __ load_klass(tmp, src); 2260 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2261 __ cmpw(rscratch1, Klass::_lh_neutral_value); 2262 __ br(Assembler::GE, *stub->entry()); 2263 } 2264 } 2265 2266 // check if negative 2267 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2268 __ cmpw(src_pos, 0); 2269 __ br(Assembler::LT, *stub->entry()); 2270 } 2271 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2272 __ cmpw(dst_pos, 0); 2273 __ br(Assembler::LT, *stub->entry()); 2274 } 2275 2276 if (flags & LIR_OpArrayCopy::length_positive_check) { 2277 __ cmpw(length, 0); 2278 __ br(Assembler::LT, *stub->entry()); 2279 } 2280 2281 if (flags & LIR_OpArrayCopy::src_range_check) { 2282 __ addw(tmp, src_pos, length); 2283 __ ldrw(rscratch1, src_length_addr); 2284 __ cmpw(tmp, rscratch1); 2285 __ br(Assembler::HI, *stub->entry()); 2286 } 2287 if (flags & LIR_OpArrayCopy::dst_range_check) { 2288 __ addw(tmp, dst_pos, length); 2289 __ ldrw(rscratch1, dst_length_addr); 2290 __ cmpw(tmp, rscratch1); 2291 __ br(Assembler::HI, *stub->entry()); 2292 } 2293 2294 if (flags & LIR_OpArrayCopy::type_check) { 2295 // We don't know the array types are compatible 2296 if (basic_type != T_OBJECT) { 2297 // Simple test for basic type arrays 2298 if (UseCompressedClassPointers) { 2299 __ ldrw(tmp, src_klass_addr); 2300 __ ldrw(rscratch1, dst_klass_addr); 2301 __ cmpw(tmp, rscratch1); 2302 } else { 2303 __ ldr(tmp, src_klass_addr); 2304 __ ldr(rscratch1, dst_klass_addr); 2305 __ cmp(tmp, rscratch1); 2306 } 2307 __ br(Assembler::NE, *stub->entry()); 2308 } else { 2309 // For object arrays, if src is a sub class of dst then we can 2310 // safely do the copy. 2311 Label cont, slow; 2312 2313 #define PUSH(r1, r2) \ 2314 stp(r1, r2, __ pre(sp, -2 * wordSize)); 2315 2316 #define POP(r1, r2) \ 2317 ldp(r1, r2, __ post(sp, 2 * wordSize)); 2318 2319 __ PUSH(src, dst); 2320 2321 __ load_klass(src, src); 2322 __ load_klass(dst, dst); 2323 2324 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); 2325 2326 __ PUSH(src, dst); 2327 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 2328 __ POP(src, dst); 2329 2330 __ cbnz(src, cont); 2331 2332 __ bind(slow); 2333 __ POP(src, dst); 2334 2335 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2336 if (copyfunc_addr != NULL) { // use stub if available 2337 // src is not a sub class of dst so we have to do a 2338 // per-element check. 2339 2340 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2341 if ((flags & mask) != mask) { 2342 // Check that at least both of them object arrays. 2343 assert(flags & mask, "one of the two should be known to be an object array"); 2344 2345 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2346 __ load_klass(tmp, src); 2347 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2348 __ load_klass(tmp, dst); 2349 } 2350 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2351 Address klass_lh_addr(tmp, lh_offset); 2352 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2353 __ ldrw(rscratch1, klass_lh_addr); 2354 __ mov(rscratch2, objArray_lh); 2355 __ eorw(rscratch1, rscratch1, rscratch2); 2356 __ cbnzw(rscratch1, *stub->entry()); 2357 } 2358 2359 // Spill because stubs can use any register they like and it's 2360 // easier to restore just those that we care about. 2361 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2362 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2363 __ str(src, Address(sp, 4*BytesPerWord)); 2364 2365 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2366 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2367 assert_different_registers(c_rarg0, dst, dst_pos, length); 2368 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2369 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2370 assert_different_registers(c_rarg1, dst, length); 2371 __ uxtw(c_rarg2, length); 2372 assert_different_registers(c_rarg2, dst); 2373 2374 __ load_klass(c_rarg4, dst); 2375 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 2376 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 2377 __ far_call(RuntimeAddress(copyfunc_addr)); 2378 2379 #ifndef PRODUCT 2380 if (PrintC1Statistics) { 2381 Label failed; 2382 __ cbnz(r0, failed); 2383 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); 2384 __ bind(failed); 2385 } 2386 #endif 2387 2388 __ cbz(r0, *stub->continuation()); 2389 2390 #ifndef PRODUCT 2391 if (PrintC1Statistics) { 2392 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); 2393 } 2394 #endif 2395 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1); 2396 2397 // Restore previously spilled arguments 2398 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2399 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2400 __ ldr(src, Address(sp, 4*BytesPerWord)); 2401 2402 // return value is -1^K where K is partial copied count 2403 __ eonw(rscratch1, r0, zr); 2404 // adjust length down and src/end pos up by partial copied count 2405 __ subw(length, length, rscratch1); 2406 __ addw(src_pos, src_pos, rscratch1); 2407 __ addw(dst_pos, dst_pos, rscratch1); 2408 } 2409 2410 __ b(*stub->entry()); 2411 2412 __ bind(cont); 2413 __ POP(src, dst); 2414 } 2415 } 2416 2417 #ifdef ASSERT 2418 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2419 // Sanity check the known type with the incoming class. For the 2420 // primitive case the types must match exactly with src.klass and 2421 // dst.klass each exactly matching the default type. For the 2422 // object array case, if no type check is needed then either the 2423 // dst type is exactly the expected type and the src type is a 2424 // subtype which we can't check or src is the same array as dst 2425 // but not necessarily exactly of type default_type. 2426 Label known_ok, halt; 2427 __ mov_metadata(tmp, default_type->constant_encoding()); 2428 if (UseCompressedClassPointers) { 2429 __ encode_klass_not_null(tmp); 2430 } 2431 2432 if (basic_type != T_OBJECT) { 2433 2434 if (UseCompressedClassPointers) { 2435 __ ldrw(rscratch1, dst_klass_addr); 2436 __ cmpw(tmp, rscratch1); 2437 } else { 2438 __ ldr(rscratch1, dst_klass_addr); 2439 __ cmp(tmp, rscratch1); 2440 } 2441 __ br(Assembler::NE, halt); 2442 if (UseCompressedClassPointers) { 2443 __ ldrw(rscratch1, src_klass_addr); 2444 __ cmpw(tmp, rscratch1); 2445 } else { 2446 __ ldr(rscratch1, src_klass_addr); 2447 __ cmp(tmp, rscratch1); 2448 } 2449 __ br(Assembler::EQ, known_ok); 2450 } else { 2451 if (UseCompressedClassPointers) { 2452 __ ldrw(rscratch1, dst_klass_addr); 2453 __ cmpw(tmp, rscratch1); 2454 } else { 2455 __ ldr(rscratch1, dst_klass_addr); 2456 __ cmp(tmp, rscratch1); 2457 } 2458 __ br(Assembler::EQ, known_ok); 2459 __ cmp(src, dst); 2460 __ br(Assembler::EQ, known_ok); 2461 } 2462 __ bind(halt); 2463 __ stop("incorrect type information in arraycopy"); 2464 __ bind(known_ok); 2465 } 2466 #endif 2467 2468 #ifndef PRODUCT 2469 if (PrintC1Statistics) { 2470 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); 2471 } 2472 #endif 2473 2474 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2475 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2476 assert_different_registers(c_rarg0, dst, dst_pos, length); 2477 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2478 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2479 assert_different_registers(c_rarg1, dst, length); 2480 __ uxtw(c_rarg2, length); 2481 assert_different_registers(c_rarg2, dst); 2482 2483 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2484 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2485 const char *name; 2486 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2487 2488 CodeBlob *cb = CodeCache::find_blob(entry); 2489 if (cb) { 2490 __ far_call(RuntimeAddress(entry)); 2491 } else { 2492 __ call_VM_leaf(entry, 3); 2493 } 2494 2495 __ bind(*stub->continuation()); 2496 } 2497 2498 2499 2500 2501 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2502 Register obj = op->obj_opr()->as_register(); // may not be an oop 2503 Register hdr = op->hdr_opr()->as_register(); 2504 Register lock = op->lock_opr()->as_register(); 2505 if (!UseFastLocking) { 2506 __ b(*op->stub()->entry()); 2507 } else if (op->code() == lir_lock) { 2508 Register scratch = noreg; 2509 if (UseBiasedLocking) { 2510 scratch = op->scratch_opr()->as_register(); 2511 } 2512 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2513 // add debug info for NullPointerException only if one is possible 2514 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 2515 if (op->info() != NULL) { 2516 add_debug_info_for_null_check(null_check_offset, op->info()); 2517 } 2518 // done 2519 } else if (op->code() == lir_unlock) { 2520 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2521 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2522 } else { 2523 Unimplemented(); 2524 } 2525 __ bind(*op->stub()->continuation()); 2526 } 2527 2528 2529 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2530 ciMethod* method = op->profiled_method(); 2531 int bci = op->profiled_bci(); 2532 ciMethod* callee = op->profiled_callee(); 2533 2534 // Update counter for all call types 2535 ciMethodData* md = method->method_data_or_null(); 2536 assert(md != NULL, "Sanity"); 2537 ciProfileData* data = md->bci_to_data(bci); 2538 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2539 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2540 Register mdo = op->mdo()->as_register(); 2541 __ mov_metadata(mdo, md->constant_encoding()); 2542 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2543 // Perform additional virtual call profiling for invokevirtual and 2544 // invokeinterface bytecodes 2545 if (op->should_profile_receiver_type()) { 2546 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2547 Register recv = op->recv()->as_register(); 2548 assert_different_registers(mdo, recv); 2549 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2550 ciKlass* known_klass = op->known_holder(); 2551 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2552 // We know the type that will be seen at this call site; we can 2553 // statically update the MethodData* rather than needing to do 2554 // dynamic tests on the receiver type 2555 2556 // NOTE: we should probably put a lock around this search to 2557 // avoid collisions by concurrent compilations 2558 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2559 uint i; 2560 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2561 ciKlass* receiver = vc_data->receiver(i); 2562 if (known_klass->equals(receiver)) { 2563 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2564 __ addptr(data_addr, DataLayout::counter_increment); 2565 return; 2566 } 2567 } 2568 2569 // Receiver type not found in profile data; select an empty slot 2570 2571 // Note that this is less efficient than it should be because it 2572 // always does a write to the receiver part of the 2573 // VirtualCallData rather than just the first time 2574 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2575 ciKlass* receiver = vc_data->receiver(i); 2576 if (receiver == NULL) { 2577 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2578 __ mov_metadata(rscratch1, known_klass->constant_encoding()); 2579 __ lea(rscratch2, recv_addr); 2580 __ str(rscratch1, Address(rscratch2)); 2581 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2582 __ addptr(data_addr, DataLayout::counter_increment); 2583 return; 2584 } 2585 } 2586 } else { 2587 __ load_klass(recv, recv); 2588 Label update_done; 2589 type_profile_helper(mdo, md, data, recv, &update_done); 2590 // Receiver did not match any saved receiver and there is no empty row for it. 2591 // Increment total counter to indicate polymorphic case. 2592 __ addptr(counter_addr, DataLayout::counter_increment); 2593 2594 __ bind(update_done); 2595 } 2596 } else { 2597 // Static call 2598 __ addptr(counter_addr, DataLayout::counter_increment); 2599 } 2600 } 2601 2602 2603 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2604 Unimplemented(); 2605 } 2606 2607 2608 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2609 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 2610 } 2611 2612 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2613 assert(op->crc()->is_single_cpu(), "crc must be register"); 2614 assert(op->val()->is_single_cpu(), "byte value must be register"); 2615 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2616 Register crc = op->crc()->as_register(); 2617 Register val = op->val()->as_register(); 2618 Register res = op->result_opr()->as_register(); 2619 2620 assert_different_registers(val, crc, res); 2621 unsigned long offset; 2622 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2623 if (offset) __ add(res, res, offset); 2624 2625 __ mvnw(crc, crc); // ~crc 2626 __ update_byte_crc32(crc, val, res); 2627 __ mvnw(res, crc); // ~crc 2628 } 2629 2630 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2631 COMMENT("emit_profile_type {"); 2632 Register obj = op->obj()->as_register(); 2633 Register tmp = op->tmp()->as_pointer_register(); 2634 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2635 ciKlass* exact_klass = op->exact_klass(); 2636 intptr_t current_klass = op->current_klass(); 2637 bool not_null = op->not_null(); 2638 bool no_conflict = op->no_conflict(); 2639 2640 Label update, next, none; 2641 2642 bool do_null = !not_null; 2643 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2644 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2645 2646 assert(do_null || do_update, "why are we here?"); 2647 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2648 assert(mdo_addr.base() != rscratch1, "wrong register"); 2649 2650 __ verify_oop(obj); 2651 2652 if (tmp != obj) { 2653 __ mov(tmp, obj); 2654 } 2655 if (do_null) { 2656 __ cbnz(tmp, update); 2657 if (!TypeEntries::was_null_seen(current_klass)) { 2658 __ ldr(rscratch2, mdo_addr); 2659 __ orr(rscratch2, rscratch2, TypeEntries::null_seen); 2660 __ str(rscratch2, mdo_addr); 2661 } 2662 if (do_update) { 2663 #ifndef ASSERT 2664 __ b(next); 2665 } 2666 #else 2667 __ b(next); 2668 } 2669 } else { 2670 __ cbnz(tmp, update); 2671 __ stop("unexpected null obj"); 2672 #endif 2673 } 2674 2675 __ bind(update); 2676 2677 if (do_update) { 2678 #ifdef ASSERT 2679 if (exact_klass != NULL) { 2680 Label ok; 2681 __ load_klass(tmp, tmp); 2682 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2683 __ eor(rscratch1, tmp, rscratch1); 2684 __ cbz(rscratch1, ok); 2685 __ stop("exact klass and actual klass differ"); 2686 __ bind(ok); 2687 } 2688 #endif 2689 if (!no_conflict) { 2690 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2691 if (exact_klass != NULL) { 2692 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2693 } else { 2694 __ load_klass(tmp, tmp); 2695 } 2696 2697 __ ldr(rscratch2, mdo_addr); 2698 __ eor(tmp, tmp, rscratch2); 2699 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2700 // klass seen before, nothing to do. The unknown bit may have been 2701 // set already but no need to check. 2702 __ cbz(rscratch1, next); 2703 2704 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2705 2706 if (TypeEntries::is_type_none(current_klass)) { 2707 __ cbz(rscratch2, none); 2708 __ cmp(rscratch2, TypeEntries::null_seen); 2709 __ br(Assembler::EQ, none); 2710 // There is a chance that the checks above (re-reading profiling 2711 // data from memory) fail if another thread has just set the 2712 // profiling to this obj's klass 2713 __ dmb(Assembler::ISHLD); 2714 __ ldr(rscratch2, mdo_addr); 2715 __ eor(tmp, tmp, rscratch2); 2716 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2717 __ cbz(rscratch1, next); 2718 } 2719 } else { 2720 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2721 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2722 2723 __ ldr(tmp, mdo_addr); 2724 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2725 } 2726 2727 // different than before. Cannot keep accurate profile. 2728 __ ldr(rscratch2, mdo_addr); 2729 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown); 2730 __ str(rscratch2, mdo_addr); 2731 2732 if (TypeEntries::is_type_none(current_klass)) { 2733 __ b(next); 2734 2735 __ bind(none); 2736 // first time here. Set profile type. 2737 __ str(tmp, mdo_addr); 2738 } 2739 } else { 2740 // There's a single possible klass at this profile point 2741 assert(exact_klass != NULL, "should be"); 2742 if (TypeEntries::is_type_none(current_klass)) { 2743 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2744 __ ldr(rscratch2, mdo_addr); 2745 __ eor(tmp, tmp, rscratch2); 2746 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2747 __ cbz(rscratch1, next); 2748 #ifdef ASSERT 2749 { 2750 Label ok; 2751 __ ldr(rscratch1, mdo_addr); 2752 __ cbz(rscratch1, ok); 2753 __ cmp(rscratch1, TypeEntries::null_seen); 2754 __ br(Assembler::EQ, ok); 2755 // may have been set by another thread 2756 __ dmb(Assembler::ISHLD); 2757 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2758 __ ldr(rscratch2, mdo_addr); 2759 __ eor(rscratch2, rscratch1, rscratch2); 2760 __ andr(rscratch2, rscratch2, TypeEntries::type_mask); 2761 __ cbz(rscratch2, ok); 2762 2763 __ stop("unexpected profiling mismatch"); 2764 __ bind(ok); 2765 } 2766 #endif 2767 // first time here. Set profile type. 2768 __ ldr(tmp, mdo_addr); 2769 } else { 2770 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2771 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 2772 2773 __ ldr(tmp, mdo_addr); 2774 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2775 2776 __ orr(tmp, tmp, TypeEntries::type_unknown); 2777 __ str(tmp, mdo_addr); 2778 // FIXME: Write barrier needed here? 2779 } 2780 } 2781 2782 __ bind(next); 2783 } 2784 COMMENT("} emit_profile_type"); 2785 } 2786 2787 2788 void LIR_Assembler::align_backward_branch_target() { 2789 } 2790 2791 2792 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2793 // tmp must be unused 2794 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2795 2796 if (left->is_single_cpu()) { 2797 assert(dest->is_single_cpu(), "expect single result reg"); 2798 __ negw(dest->as_register(), left->as_register()); 2799 } else if (left->is_double_cpu()) { 2800 assert(dest->is_double_cpu(), "expect double result reg"); 2801 __ neg(dest->as_register_lo(), left->as_register_lo()); 2802 } else if (left->is_single_fpu()) { 2803 assert(dest->is_single_fpu(), "expect single float result reg"); 2804 __ fnegs(dest->as_float_reg(), left->as_float_reg()); 2805 } else { 2806 assert(left->is_double_fpu(), "expect double float operand reg"); 2807 assert(dest->is_double_fpu(), "expect double float result reg"); 2808 __ fnegd(dest->as_double_reg(), left->as_double_reg()); 2809 } 2810 } 2811 2812 2813 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2814 assert(patch_code == lir_patch_none, "Patch code not supported"); 2815 __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); 2816 } 2817 2818 2819 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2820 assert(!tmp->is_valid(), "don't need temporary"); 2821 2822 CodeBlob *cb = CodeCache::find_blob(dest); 2823 if (cb) { 2824 __ far_call(RuntimeAddress(dest)); 2825 } else { 2826 __ mov(rscratch1, RuntimeAddress(dest)); 2827 __ blr(rscratch1); 2828 } 2829 2830 if (info != NULL) { 2831 add_call_info_here(info); 2832 } 2833 __ maybe_isb(); 2834 } 2835 2836 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2837 if (dest->is_address() || src->is_address()) { 2838 move_op(src, dest, type, lir_patch_none, info, 2839 /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false); 2840 } else { 2841 ShouldNotReachHere(); 2842 } 2843 } 2844 2845 #ifdef ASSERT 2846 // emit run-time assertion 2847 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2848 assert(op->code() == lir_assert, "must be"); 2849 2850 if (op->in_opr1()->is_valid()) { 2851 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2852 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2853 } else { 2854 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2855 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2856 } 2857 2858 Label ok; 2859 if (op->condition() != lir_cond_always) { 2860 Assembler::Condition acond = Assembler::AL; 2861 switch (op->condition()) { 2862 case lir_cond_equal: acond = Assembler::EQ; break; 2863 case lir_cond_notEqual: acond = Assembler::NE; break; 2864 case lir_cond_less: acond = Assembler::LT; break; 2865 case lir_cond_lessEqual: acond = Assembler::LE; break; 2866 case lir_cond_greaterEqual: acond = Assembler::GE; break; 2867 case lir_cond_greater: acond = Assembler::GT; break; 2868 case lir_cond_belowEqual: acond = Assembler::LS; break; 2869 case lir_cond_aboveEqual: acond = Assembler::HS; break; 2870 default: ShouldNotReachHere(); 2871 } 2872 __ br(acond, ok); 2873 } 2874 if (op->halt()) { 2875 const char* str = __ code_string(op->msg()); 2876 __ stop(str); 2877 } else { 2878 breakpoint(); 2879 } 2880 __ bind(ok); 2881 } 2882 #endif 2883 2884 #ifndef PRODUCT 2885 #define COMMENT(x) do { __ block_comment(x); } while (0) 2886 #else 2887 #define COMMENT(x) 2888 #endif 2889 2890 void LIR_Assembler::membar() { 2891 COMMENT("membar"); 2892 __ membar(MacroAssembler::AnyAny); 2893 } 2894 2895 void LIR_Assembler::membar_acquire() { 2896 __ membar(Assembler::LoadLoad|Assembler::LoadStore); 2897 } 2898 2899 void LIR_Assembler::membar_release() { 2900 __ membar(Assembler::LoadStore|Assembler::StoreStore); 2901 } 2902 2903 void LIR_Assembler::membar_loadload() { 2904 __ membar(Assembler::LoadLoad); 2905 } 2906 2907 void LIR_Assembler::membar_storestore() { 2908 __ membar(MacroAssembler::StoreStore); 2909 } 2910 2911 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); } 2912 2913 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); } 2914 2915 void LIR_Assembler::on_spin_wait() { 2916 Unimplemented(); 2917 } 2918 2919 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2920 __ mov(result_reg->as_register(), rthread); 2921 } 2922 2923 2924 void LIR_Assembler::peephole(LIR_List *lir) { 2925 #if 0 2926 if (tableswitch_count >= max_tableswitches) 2927 return; 2928 2929 /* 2930 This finite-state automaton recognizes sequences of compare-and- 2931 branch instructions. We will turn them into a tableswitch. You 2932 could argue that C1 really shouldn't be doing this sort of 2933 optimization, but without it the code is really horrible. 2934 */ 2935 2936 enum { start_s, cmp1_s, beq_s, cmp_s } state; 2937 int first_key, last_key = -2147483648; 2938 int next_key = 0; 2939 int start_insn = -1; 2940 int last_insn = -1; 2941 Register reg = noreg; 2942 LIR_Opr reg_opr; 2943 state = start_s; 2944 2945 LIR_OpList* inst = lir->instructions_list(); 2946 for (int i = 0; i < inst->length(); i++) { 2947 LIR_Op* op = inst->at(i); 2948 switch (state) { 2949 case start_s: 2950 first_key = -1; 2951 start_insn = i; 2952 switch (op->code()) { 2953 case lir_cmp: 2954 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 2955 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 2956 if (opr1->is_cpu_register() && opr1->is_single_cpu() 2957 && opr2->is_constant() 2958 && opr2->type() == T_INT) { 2959 reg_opr = opr1; 2960 reg = opr1->as_register(); 2961 first_key = opr2->as_constant_ptr()->as_jint(); 2962 next_key = first_key + 1; 2963 state = cmp_s; 2964 goto next_state; 2965 } 2966 break; 2967 } 2968 break; 2969 case cmp_s: 2970 switch (op->code()) { 2971 case lir_branch: 2972 if (op->as_OpBranch()->cond() == lir_cond_equal) { 2973 state = beq_s; 2974 last_insn = i; 2975 goto next_state; 2976 } 2977 } 2978 state = start_s; 2979 break; 2980 case beq_s: 2981 switch (op->code()) { 2982 case lir_cmp: { 2983 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 2984 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 2985 if (opr1->is_cpu_register() && opr1->is_single_cpu() 2986 && opr1->as_register() == reg 2987 && opr2->is_constant() 2988 && opr2->type() == T_INT 2989 && opr2->as_constant_ptr()->as_jint() == next_key) { 2990 last_key = next_key; 2991 next_key++; 2992 state = cmp_s; 2993 goto next_state; 2994 } 2995 } 2996 } 2997 last_key = next_key; 2998 state = start_s; 2999 break; 3000 default: 3001 assert(false, "impossible state"); 3002 } 3003 if (state == start_s) { 3004 if (first_key < last_key - 5L && reg != noreg) { 3005 { 3006 // printf("found run register %d starting at insn %d low value %d high value %d\n", 3007 // reg->encoding(), 3008 // start_insn, first_key, last_key); 3009 // for (int i = 0; i < inst->length(); i++) { 3010 // inst->at(i)->print(); 3011 // tty->print("\n"); 3012 // } 3013 // tty->print("\n"); 3014 } 3015 3016 struct tableswitch *sw = &switches[tableswitch_count]; 3017 sw->_insn_index = start_insn, sw->_first_key = first_key, 3018 sw->_last_key = last_key, sw->_reg = reg; 3019 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after)); 3020 { 3021 // Insert the new table of branches 3022 int offset = last_insn; 3023 for (int n = first_key; n < last_key; n++) { 3024 inst->insert_before 3025 (last_insn + 1, 3026 new LIR_OpBranch(lir_cond_always, T_ILLEGAL, 3027 inst->at(offset)->as_OpBranch()->label())); 3028 offset -= 2, i++; 3029 } 3030 } 3031 // Delete all the old compare-and-branch instructions 3032 for (int n = first_key; n < last_key; n++) { 3033 inst->remove_at(start_insn); 3034 inst->remove_at(start_insn); 3035 } 3036 // Insert the tableswitch instruction 3037 inst->insert_before(start_insn, 3038 new LIR_Op2(lir_cmp, lir_cond_always, 3039 LIR_OprFact::intConst(tableswitch_count), 3040 reg_opr)); 3041 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches)); 3042 tableswitch_count++; 3043 } 3044 reg = noreg; 3045 last_key = -2147483648; 3046 } 3047 next_state: 3048 ; 3049 } 3050 #endif 3051 } 3052 3053 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) { 3054 Address addr = as_Address(src->as_address_ptr()); 3055 BasicType type = src->type(); 3056 bool is_oop = type == T_OBJECT || type == T_ARRAY; 3057 3058 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr); 3059 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr); 3060 3061 switch(type) { 3062 case T_INT: 3063 xchg = &MacroAssembler::atomic_xchgalw; 3064 add = &MacroAssembler::atomic_addalw; 3065 break; 3066 case T_LONG: 3067 xchg = &MacroAssembler::atomic_xchgal; 3068 add = &MacroAssembler::atomic_addal; 3069 break; 3070 case T_OBJECT: 3071 case T_ARRAY: 3072 if (UseCompressedOops) { 3073 xchg = &MacroAssembler::atomic_xchgalw; 3074 add = &MacroAssembler::atomic_addalw; 3075 } else { 3076 xchg = &MacroAssembler::atomic_xchgal; 3077 add = &MacroAssembler::atomic_addal; 3078 } 3079 break; 3080 default: 3081 ShouldNotReachHere(); 3082 xchg = &MacroAssembler::atomic_xchgal; 3083 add = &MacroAssembler::atomic_addal; // unreachable 3084 } 3085 3086 switch (code) { 3087 case lir_xadd: 3088 { 3089 RegisterOrConstant inc; 3090 Register tmp = as_reg(tmp_op); 3091 Register dst = as_reg(dest); 3092 if (data->is_constant()) { 3093 inc = RegisterOrConstant(as_long(data)); 3094 assert_different_registers(dst, addr.base(), tmp, 3095 rscratch1, rscratch2); 3096 } else { 3097 inc = RegisterOrConstant(as_reg(data)); 3098 assert_different_registers(inc.as_register(), dst, addr.base(), tmp, 3099 rscratch1, rscratch2); 3100 } 3101 __ lea(tmp, addr); 3102 (_masm->*add)(dst, inc, tmp); 3103 break; 3104 } 3105 case lir_xchg: 3106 { 3107 Register tmp = tmp_op->as_register(); 3108 Register obj = as_reg(data); 3109 Register dst = as_reg(dest); 3110 if (is_oop && UseCompressedOops) { 3111 __ encode_heap_oop(rscratch2, obj); 3112 obj = rscratch2; 3113 } 3114 assert_different_registers(obj, addr.base(), tmp, rscratch1, dst); 3115 __ lea(tmp, addr); 3116 (_masm->*xchg)(dst, obj, tmp); 3117 if (is_oop && UseCompressedOops) { 3118 __ decode_heap_oop(dst); 3119 } 3120 } 3121 break; 3122 default: 3123 ShouldNotReachHere(); 3124 } 3125 __ membar(__ AnyAny); 3126 } 3127 3128 #undef __