1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "asm/assembler.hpp" 29 #include "c1/c1_CodeStubs.hpp" 30 #include "c1/c1_Compilation.hpp" 31 #include "c1/c1_LIRAssembler.hpp" 32 #include "c1/c1_MacroAssembler.hpp" 33 #include "c1/c1_Runtime1.hpp" 34 #include "c1/c1_ValueStack.hpp" 35 #include "ci/ciArrayKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/cardTableBarrierSet.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "nativeInst_aarch64.hpp" 41 #include "oops/objArrayKlass.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "vmreg_aarch64.inline.hpp" 45 46 47 48 #ifndef PRODUCT 49 #define COMMENT(x) do { __ block_comment(x); } while (0) 50 #else 51 #define COMMENT(x) 52 #endif 53 54 NEEDS_CLEANUP // remove this definitions ? 55 const Register IC_Klass = rscratch2; // where the IC klass is cached 56 const Register SYNC_header = r0; // synchronization header 57 const Register SHIFT_count = r0; // where count for shift operations must be 58 59 #define __ _masm-> 60 61 62 static void select_different_registers(Register preserve, 63 Register extra, 64 Register &tmp1, 65 Register &tmp2) { 66 if (tmp1 == preserve) { 67 assert_different_registers(tmp1, tmp2, extra); 68 tmp1 = extra; 69 } else if (tmp2 == preserve) { 70 assert_different_registers(tmp1, tmp2, extra); 71 tmp2 = extra; 72 } 73 assert_different_registers(preserve, tmp1, tmp2); 74 } 75 76 77 78 static void select_different_registers(Register preserve, 79 Register extra, 80 Register &tmp1, 81 Register &tmp2, 82 Register &tmp3) { 83 if (tmp1 == preserve) { 84 assert_different_registers(tmp1, tmp2, tmp3, extra); 85 tmp1 = extra; 86 } else if (tmp2 == preserve) { 87 assert_different_registers(tmp1, tmp2, tmp3, extra); 88 tmp2 = extra; 89 } else if (tmp3 == preserve) { 90 assert_different_registers(tmp1, tmp2, tmp3, extra); 91 tmp3 = extra; 92 } 93 assert_different_registers(preserve, tmp1, tmp2, tmp3); 94 } 95 96 97 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } 98 99 100 LIR_Opr LIR_Assembler::receiverOpr() { 101 return FrameMap::receiver_opr; 102 } 103 104 LIR_Opr LIR_Assembler::osrBufferPointer() { 105 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 106 } 107 108 //--------------fpu register translations----------------------- 109 110 111 address LIR_Assembler::float_constant(float f) { 112 address const_addr = __ float_constant(f); 113 if (const_addr == NULL) { 114 bailout("const section overflow"); 115 return __ code()->consts()->start(); 116 } else { 117 return const_addr; 118 } 119 } 120 121 122 address LIR_Assembler::double_constant(double d) { 123 address const_addr = __ double_constant(d); 124 if (const_addr == NULL) { 125 bailout("const section overflow"); 126 return __ code()->consts()->start(); 127 } else { 128 return const_addr; 129 } 130 } 131 132 address LIR_Assembler::int_constant(jlong n) { 133 address const_addr = __ long_constant(n); 134 if (const_addr == NULL) { 135 bailout("const section overflow"); 136 return __ code()->consts()->start(); 137 } else { 138 return const_addr; 139 } 140 } 141 142 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); } 143 144 void LIR_Assembler::reset_FPU() { Unimplemented(); } 145 146 void LIR_Assembler::fpop() { Unimplemented(); } 147 148 void LIR_Assembler::fxch(int i) { Unimplemented(); } 149 150 void LIR_Assembler::fld(int i) { Unimplemented(); } 151 152 void LIR_Assembler::ffree(int i) { Unimplemented(); } 153 154 void LIR_Assembler::breakpoint() { Unimplemented(); } 155 156 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } 157 158 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } 159 160 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; } 161 //------------------------------------------- 162 163 static Register as_reg(LIR_Opr op) { 164 return op->is_double_cpu() ? op->as_register_lo() : op->as_register(); 165 } 166 167 static jlong as_long(LIR_Opr data) { 168 jlong result; 169 switch (data->type()) { 170 case T_INT: 171 result = (data->as_jint()); 172 break; 173 case T_LONG: 174 result = (data->as_jlong()); 175 break; 176 default: 177 ShouldNotReachHere(); 178 result = 0; // unreachable 179 } 180 return result; 181 } 182 183 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 184 Register base = addr->base()->as_pointer_register(); 185 LIR_Opr opr = addr->index(); 186 if (opr->is_cpu_register()) { 187 Register index; 188 if (opr->is_single_cpu()) 189 index = opr->as_register(); 190 else 191 index = opr->as_register_lo(); 192 assert(addr->disp() == 0, "must be"); 193 switch(opr->type()) { 194 case T_INT: 195 return Address(base, index, Address::sxtw(addr->scale())); 196 case T_LONG: 197 return Address(base, index, Address::lsl(addr->scale())); 198 default: 199 ShouldNotReachHere(); 200 } 201 } else { 202 intptr_t addr_offset = intptr_t(addr->disp()); 203 if (Address::offset_ok_for_immed(addr_offset, addr->scale())) 204 return Address(base, addr_offset, Address::lsl(addr->scale())); 205 else { 206 __ mov(tmp, addr_offset); 207 return Address(base, tmp, Address::lsl(addr->scale())); 208 } 209 } 210 return Address(); 211 } 212 213 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 214 ShouldNotReachHere(); 215 return Address(); 216 } 217 218 Address LIR_Assembler::as_Address(LIR_Address* addr) { 219 return as_Address(addr, rscratch1); 220 } 221 222 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 223 return as_Address(addr, rscratch1); // Ouch 224 // FIXME: This needs to be much more clever. See x86. 225 } 226 227 228 void LIR_Assembler::osr_entry() { 229 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 230 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 231 ValueStack* entry_state = osr_entry->state(); 232 int number_of_locks = entry_state->locks_size(); 233 234 // we jump here if osr happens with the interpreter 235 // state set up to continue at the beginning of the 236 // loop that triggered osr - in particular, we have 237 // the following registers setup: 238 // 239 // r2: osr buffer 240 // 241 242 // build frame 243 ciMethod* m = compilation()->method(); 244 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 245 246 // OSR buffer is 247 // 248 // locals[nlocals-1..0] 249 // monitors[0..number_of_locks] 250 // 251 // locals is a direct copy of the interpreter frame so in the osr buffer 252 // so first slot in the local array is the last local from the interpreter 253 // and last slot is local[0] (receiver) from the interpreter 254 // 255 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 256 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 257 // in the interpreter frame (the method lock if a sync method) 258 259 // Initialize monitors in the compiled activation. 260 // r2: pointer to osr buffer 261 // 262 // All other registers are dead at this point and the locals will be 263 // copied into place by code emitted in the IR. 264 265 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 266 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 267 int monitor_offset = BytesPerWord * method()->max_locals() + 268 (2 * BytesPerWord) * (number_of_locks - 1); 269 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 270 // the OSR buffer using 2 word entries: first the lock and then 271 // the oop. 272 for (int i = 0; i < number_of_locks; i++) { 273 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 274 #ifdef ASSERT 275 // verify the interpreter's monitor has a non-null object 276 { 277 Label L; 278 __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 279 __ cbnz(rscratch1, L); 280 __ stop("locked object is NULL"); 281 __ bind(L); 282 } 283 #endif 284 __ ldr(r19, Address(OSR_buf, slot_offset + 0)); 285 __ str(r19, frame_map()->address_for_monitor_lock(i)); 286 __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 287 __ str(r19, frame_map()->address_for_monitor_object(i)); 288 } 289 } 290 } 291 292 293 // inline cache check; done before the frame is built. 294 int LIR_Assembler::check_icache() { 295 Register receiver = FrameMap::receiver_opr->as_register(); 296 Register ic_klass = IC_Klass; 297 int start_offset = __ offset(); 298 __ inline_cache_check(receiver, ic_klass); 299 300 // if icache check fails, then jump to runtime routine 301 // Note: RECEIVER must still contain the receiver! 302 Label dont; 303 __ br(Assembler::EQ, dont); 304 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 305 306 // We align the verified entry point unless the method body 307 // (including its inline cache check) will fit in a single 64-byte 308 // icache line. 309 if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) { 310 // force alignment after the cache check. 311 __ align(CodeEntryAlignment); 312 } 313 314 __ bind(dont); 315 return start_offset; 316 } 317 318 319 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 320 if (o == NULL) { 321 __ mov(reg, zr); 322 } else { 323 __ movoop(reg, o, /*immediate*/true); 324 } 325 } 326 327 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { 328 address target = NULL; 329 relocInfo::relocType reloc_type = relocInfo::none; 330 331 switch (patching_id(info)) { 332 case PatchingStub::access_field_id: 333 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 334 reloc_type = relocInfo::section_word_type; 335 break; 336 case PatchingStub::load_klass_id: 337 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 338 reloc_type = relocInfo::metadata_type; 339 break; 340 case PatchingStub::load_mirror_id: 341 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 342 reloc_type = relocInfo::oop_type; 343 break; 344 case PatchingStub::load_appendix_id: 345 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 346 reloc_type = relocInfo::oop_type; 347 break; 348 default: ShouldNotReachHere(); 349 } 350 351 __ far_call(RuntimeAddress(target)); 352 add_call_info_here(info); 353 } 354 355 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 356 deoptimize_trap(info); 357 } 358 359 360 // This specifies the rsp decrement needed to build the frame 361 int LIR_Assembler::initial_frame_size_in_bytes() const { 362 // if rounding, must let FrameMap know! 363 364 // The frame_map records size in slots (32bit word) 365 366 // subtract two words to account for return address and link 367 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 368 } 369 370 371 int LIR_Assembler::emit_exception_handler() { 372 // if the last instruction is a call (typically to do a throw which 373 // is coming at the end after block reordering) the return address 374 // must still point into the code area in order to avoid assertion 375 // failures when searching for the corresponding bci => add a nop 376 // (was bug 5/14/1999 - gri) 377 __ nop(); 378 379 // generate code for exception handler 380 address handler_base = __ start_a_stub(exception_handler_size()); 381 if (handler_base == NULL) { 382 // not enough space left for the handler 383 bailout("exception handler overflow"); 384 return -1; 385 } 386 387 int offset = code_offset(); 388 389 // the exception oop and pc are in r0, and r3 390 // no other registers need to be preserved, so invalidate them 391 __ invalidate_registers(false, true, true, false, true, true); 392 393 // check that there is really an exception 394 __ verify_not_null_oop(r0); 395 396 // search an exception handler (r0: exception oop, r3: throwing pc) 397 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here(); 398 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 399 __ end_a_stub(); 400 401 return offset; 402 } 403 404 405 // Emit the code to remove the frame from the stack in the exception 406 // unwind path. 407 int LIR_Assembler::emit_unwind_handler() { 408 #ifndef PRODUCT 409 if (CommentedAssembly) { 410 _masm->block_comment("Unwind handler"); 411 } 412 #endif 413 414 int offset = code_offset(); 415 416 // Fetch the exception from TLS and clear out exception related thread state 417 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 418 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 419 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 420 421 __ bind(_unwind_handler_entry); 422 __ verify_not_null_oop(r0); 423 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 424 __ mov(r19, r0); // Preserve the exception 425 } 426 427 // Preform needed unlocking 428 MonitorExitStub* stub = NULL; 429 if (method()->is_synchronized()) { 430 monitor_address(0, FrameMap::r0_opr); 431 stub = new MonitorExitStub(FrameMap::r0_opr, true, 0); 432 __ unlock_object(r5, r4, r0, *stub->entry()); 433 __ bind(*stub->continuation()); 434 } 435 436 if (compilation()->env()->dtrace_method_probes()) { 437 __ call_Unimplemented(); 438 #if 0 439 __ movptr(Address(rsp, 0), rax); 440 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 441 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 442 #endif 443 } 444 445 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 446 __ mov(r0, r19); // Restore the exception 447 } 448 449 // remove the activation and dispatch to the unwind handler 450 __ block_comment("remove_frame and dispatch to the unwind handler"); 451 __ remove_frame(initial_frame_size_in_bytes()); 452 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 453 454 // Emit the slow path assembly 455 if (stub != NULL) { 456 stub->emit_code(this); 457 } 458 459 return offset; 460 } 461 462 463 int LIR_Assembler::emit_deopt_handler() { 464 // if the last instruction is a call (typically to do a throw which 465 // is coming at the end after block reordering) the return address 466 // must still point into the code area in order to avoid assertion 467 // failures when searching for the corresponding bci => add a nop 468 // (was bug 5/14/1999 - gri) 469 __ nop(); 470 471 // generate code for exception handler 472 address handler_base = __ start_a_stub(deopt_handler_size()); 473 if (handler_base == NULL) { 474 // not enough space left for the handler 475 bailout("deopt handler overflow"); 476 return -1; 477 } 478 479 int offset = code_offset(); 480 481 __ adr(lr, pc()); 482 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 483 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 484 __ end_a_stub(); 485 486 return offset; 487 } 488 489 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { 490 _masm->code_section()->relocate(adr, relocInfo::poll_type); 491 int pc_offset = code_offset(); 492 flush_debug_info(pc_offset); 493 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 494 if (info->exception_handlers() != NULL) { 495 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 496 } 497 } 498 499 void LIR_Assembler::return_op(LIR_Opr result) { 500 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,"); 501 502 // Pop the stack before the safepoint code 503 __ remove_frame(initial_frame_size_in_bytes()); 504 505 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 506 __ reserved_stack_check(); 507 } 508 509 address polling_page(os::get_polling_page()); 510 __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type); 511 __ ret(lr); 512 } 513 514 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 515 address polling_page(os::get_polling_page()); 516 guarantee(info != NULL, "Shouldn't be NULL"); 517 assert(os::is_poll_address(polling_page), "should be"); 518 __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type); 519 add_debug_info_for_branch(info); // This isn't just debug info: 520 // it's the oop map 521 __ read_polling_page(rscratch1, relocInfo::poll_type); 522 return __ offset(); 523 } 524 525 526 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 527 if (from_reg == r31_sp) 528 from_reg = sp; 529 if (to_reg == r31_sp) 530 to_reg = sp; 531 __ mov(to_reg, from_reg); 532 } 533 534 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } 535 536 537 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 538 assert(src->is_constant(), "should not call otherwise"); 539 assert(dest->is_register(), "should not call otherwise"); 540 LIR_Const* c = src->as_constant_ptr(); 541 542 switch (c->type()) { 543 case T_INT: { 544 assert(patch_code == lir_patch_none, "no patching handled here"); 545 __ movw(dest->as_register(), c->as_jint()); 546 break; 547 } 548 549 case T_ADDRESS: { 550 assert(patch_code == lir_patch_none, "no patching handled here"); 551 __ mov(dest->as_register(), c->as_jint()); 552 break; 553 } 554 555 case T_LONG: { 556 assert(patch_code == lir_patch_none, "no patching handled here"); 557 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong()); 558 break; 559 } 560 561 case T_OBJECT: { 562 if (patch_code == lir_patch_none) { 563 jobject2reg(c->as_jobject(), dest->as_register()); 564 } else { 565 jobject2reg_with_patching(dest->as_register(), info); 566 } 567 break; 568 } 569 570 case T_METADATA: { 571 if (patch_code != lir_patch_none) { 572 klass2reg_with_patching(dest->as_register(), info); 573 } else { 574 __ mov_metadata(dest->as_register(), c->as_metadata()); 575 } 576 break; 577 } 578 579 case T_FLOAT: { 580 if (__ operand_valid_for_float_immediate(c->as_jfloat())) { 581 __ fmovs(dest->as_float_reg(), (c->as_jfloat())); 582 } else { 583 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat()))); 584 __ ldrs(dest->as_float_reg(), Address(rscratch1)); 585 } 586 break; 587 } 588 589 case T_DOUBLE: { 590 if (__ operand_valid_for_float_immediate(c->as_jdouble())) { 591 __ fmovd(dest->as_double_reg(), (c->as_jdouble())); 592 } else { 593 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble()))); 594 __ ldrd(dest->as_double_reg(), Address(rscratch1)); 595 } 596 break; 597 } 598 599 default: 600 ShouldNotReachHere(); 601 } 602 } 603 604 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 605 LIR_Const* c = src->as_constant_ptr(); 606 switch (c->type()) { 607 case T_OBJECT: 608 { 609 if (! c->as_jobject()) 610 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 611 else { 612 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 613 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 614 } 615 } 616 break; 617 case T_ADDRESS: 618 { 619 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 620 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 621 } 622 case T_INT: 623 case T_FLOAT: 624 { 625 Register reg = zr; 626 if (c->as_jint_bits() == 0) 627 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 628 else { 629 __ movw(rscratch1, c->as_jint_bits()); 630 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix())); 631 } 632 } 633 break; 634 case T_LONG: 635 case T_DOUBLE: 636 { 637 Register reg = zr; 638 if (c->as_jlong_bits() == 0) 639 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(), 640 lo_word_offset_in_bytes)); 641 else { 642 __ mov(rscratch1, (intptr_t)c->as_jlong_bits()); 643 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(), 644 lo_word_offset_in_bytes)); 645 } 646 } 647 break; 648 default: 649 ShouldNotReachHere(); 650 } 651 } 652 653 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 654 assert(src->is_constant(), "should not call otherwise"); 655 LIR_Const* c = src->as_constant_ptr(); 656 LIR_Address* to_addr = dest->as_address_ptr(); 657 658 void (Assembler::* insn)(Register Rt, const Address &adr); 659 660 switch (type) { 661 case T_ADDRESS: 662 assert(c->as_jint() == 0, "should be"); 663 insn = &Assembler::str; 664 break; 665 case T_LONG: 666 assert(c->as_jlong() == 0, "should be"); 667 insn = &Assembler::str; 668 break; 669 case T_INT: 670 assert(c->as_jint() == 0, "should be"); 671 insn = &Assembler::strw; 672 break; 673 case T_OBJECT: 674 case T_ARRAY: 675 assert(c->as_jobject() == 0, "should be"); 676 if (UseCompressedOops && !wide) { 677 insn = &Assembler::strw; 678 } else { 679 insn = &Assembler::str; 680 } 681 break; 682 case T_CHAR: 683 case T_SHORT: 684 assert(c->as_jint() == 0, "should be"); 685 insn = &Assembler::strh; 686 break; 687 case T_BOOLEAN: 688 case T_BYTE: 689 assert(c->as_jint() == 0, "should be"); 690 insn = &Assembler::strb; 691 break; 692 default: 693 ShouldNotReachHere(); 694 insn = &Assembler::str; // unreachable 695 } 696 697 if (info) add_debug_info_for_null_check_here(info); 698 (_masm->*insn)(zr, as_Address(to_addr, rscratch1)); 699 } 700 701 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 702 assert(src->is_register(), "should not call otherwise"); 703 assert(dest->is_register(), "should not call otherwise"); 704 705 // move between cpu-registers 706 if (dest->is_single_cpu()) { 707 if (src->type() == T_LONG) { 708 // Can do LONG -> OBJECT 709 move_regs(src->as_register_lo(), dest->as_register()); 710 return; 711 } 712 assert(src->is_single_cpu(), "must match"); 713 if (src->type() == T_OBJECT) { 714 __ verify_oop(src->as_register()); 715 } 716 move_regs(src->as_register(), dest->as_register()); 717 718 } else if (dest->is_double_cpu()) { 719 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 720 // Surprising to me but we can see move of a long to t_object 721 __ verify_oop(src->as_register()); 722 move_regs(src->as_register(), dest->as_register_lo()); 723 return; 724 } 725 assert(src->is_double_cpu(), "must match"); 726 Register f_lo = src->as_register_lo(); 727 Register f_hi = src->as_register_hi(); 728 Register t_lo = dest->as_register_lo(); 729 Register t_hi = dest->as_register_hi(); 730 assert(f_hi == f_lo, "must be same"); 731 assert(t_hi == t_lo, "must be same"); 732 move_regs(f_lo, t_lo); 733 734 } else if (dest->is_single_fpu()) { 735 __ fmovs(dest->as_float_reg(), src->as_float_reg()); 736 737 } else if (dest->is_double_fpu()) { 738 __ fmovd(dest->as_double_reg(), src->as_double_reg()); 739 740 } else { 741 ShouldNotReachHere(); 742 } 743 } 744 745 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 746 if (src->is_single_cpu()) { 747 if (type == T_ARRAY || type == T_OBJECT) { 748 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 749 __ verify_oop(src->as_register()); 750 } else if (type == T_METADATA || type == T_DOUBLE) { 751 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 752 } else { 753 __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 754 } 755 756 } else if (src->is_double_cpu()) { 757 Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 758 __ str(src->as_register_lo(), dest_addr_LO); 759 760 } else if (src->is_single_fpu()) { 761 Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 762 __ strs(src->as_float_reg(), dest_addr); 763 764 } else if (src->is_double_fpu()) { 765 Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 766 __ strd(src->as_double_reg(), dest_addr); 767 768 } else { 769 ShouldNotReachHere(); 770 } 771 772 } 773 774 775 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 776 LIR_Address* to_addr = dest->as_address_ptr(); 777 PatchingStub* patch = NULL; 778 Register compressed_src = rscratch1; 779 780 if (patch_code != lir_patch_none) { 781 deoptimize_trap(info); 782 return; 783 } 784 785 if (type == T_ARRAY || type == T_OBJECT) { 786 __ verify_oop(src->as_register()); 787 788 if (UseCompressedOops && !wide) { 789 __ encode_heap_oop(compressed_src, src->as_register()); 790 } else { 791 compressed_src = src->as_register(); 792 } 793 } 794 795 int null_check_here = code_offset(); 796 switch (type) { 797 case T_FLOAT: { 798 __ strs(src->as_float_reg(), as_Address(to_addr)); 799 break; 800 } 801 802 case T_DOUBLE: { 803 __ strd(src->as_double_reg(), as_Address(to_addr)); 804 break; 805 } 806 807 case T_ARRAY: // fall through 808 case T_OBJECT: // fall through 809 if (UseCompressedOops && !wide) { 810 __ strw(compressed_src, as_Address(to_addr, rscratch2)); 811 } else { 812 __ str(compressed_src, as_Address(to_addr)); 813 } 814 break; 815 case T_METADATA: 816 // We get here to store a method pointer to the stack to pass to 817 // a dtrace runtime call. This can't work on 64 bit with 818 // compressed klass ptrs: T_METADATA can be a compressed klass 819 // ptr or a 64 bit method pointer. 820 ShouldNotReachHere(); 821 __ str(src->as_register(), as_Address(to_addr)); 822 break; 823 case T_ADDRESS: 824 __ str(src->as_register(), as_Address(to_addr)); 825 break; 826 case T_INT: 827 __ strw(src->as_register(), as_Address(to_addr)); 828 break; 829 830 case T_LONG: { 831 __ str(src->as_register_lo(), as_Address_lo(to_addr)); 832 break; 833 } 834 835 case T_BYTE: // fall through 836 case T_BOOLEAN: { 837 __ strb(src->as_register(), as_Address(to_addr)); 838 break; 839 } 840 841 case T_CHAR: // fall through 842 case T_SHORT: 843 __ strh(src->as_register(), as_Address(to_addr)); 844 break; 845 846 default: 847 ShouldNotReachHere(); 848 } 849 if (info != NULL) { 850 add_debug_info_for_null_check(null_check_here, info); 851 } 852 } 853 854 855 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 856 assert(src->is_stack(), "should not call otherwise"); 857 assert(dest->is_register(), "should not call otherwise"); 858 859 if (dest->is_single_cpu()) { 860 if (type == T_ARRAY || type == T_OBJECT) { 861 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 862 __ verify_oop(dest->as_register()); 863 } else if (type == T_METADATA) { 864 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 865 } else { 866 __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 867 } 868 869 } else if (dest->is_double_cpu()) { 870 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 871 __ ldr(dest->as_register_lo(), src_addr_LO); 872 873 } else if (dest->is_single_fpu()) { 874 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 875 __ ldrs(dest->as_float_reg(), src_addr); 876 877 } else if (dest->is_double_fpu()) { 878 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 879 __ ldrd(dest->as_double_reg(), src_addr); 880 881 } else { 882 ShouldNotReachHere(); 883 } 884 } 885 886 887 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 888 address target = NULL; 889 relocInfo::relocType reloc_type = relocInfo::none; 890 891 switch (patching_id(info)) { 892 case PatchingStub::access_field_id: 893 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 894 reloc_type = relocInfo::section_word_type; 895 break; 896 case PatchingStub::load_klass_id: 897 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 898 reloc_type = relocInfo::metadata_type; 899 break; 900 case PatchingStub::load_mirror_id: 901 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 902 reloc_type = relocInfo::oop_type; 903 break; 904 case PatchingStub::load_appendix_id: 905 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 906 reloc_type = relocInfo::oop_type; 907 break; 908 default: ShouldNotReachHere(); 909 } 910 911 __ far_call(RuntimeAddress(target)); 912 add_call_info_here(info); 913 } 914 915 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 916 917 LIR_Opr temp; 918 if (type == T_LONG || type == T_DOUBLE) 919 temp = FrameMap::rscratch1_long_opr; 920 else 921 temp = FrameMap::rscratch1_opr; 922 923 stack2reg(src, temp, src->type()); 924 reg2stack(temp, dest, dest->type(), false); 925 } 926 927 928 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 929 LIR_Address* addr = src->as_address_ptr(); 930 LIR_Address* from_addr = src->as_address_ptr(); 931 932 if (addr->base()->type() == T_OBJECT) { 933 __ verify_oop(addr->base()->as_pointer_register()); 934 } 935 936 if (patch_code != lir_patch_none) { 937 deoptimize_trap(info); 938 return; 939 } 940 941 if (info != NULL) { 942 add_debug_info_for_null_check_here(info); 943 } 944 int null_check_here = code_offset(); 945 switch (type) { 946 case T_FLOAT: { 947 __ ldrs(dest->as_float_reg(), as_Address(from_addr)); 948 break; 949 } 950 951 case T_DOUBLE: { 952 __ ldrd(dest->as_double_reg(), as_Address(from_addr)); 953 break; 954 } 955 956 case T_ARRAY: // fall through 957 case T_OBJECT: // fall through 958 if (UseCompressedOops && !wide) { 959 __ ldrw(dest->as_register(), as_Address(from_addr)); 960 } else { 961 __ ldr(dest->as_register(), as_Address(from_addr)); 962 } 963 break; 964 case T_METADATA: 965 // We get here to store a method pointer to the stack to pass to 966 // a dtrace runtime call. This can't work on 64 bit with 967 // compressed klass ptrs: T_METADATA can be a compressed klass 968 // ptr or a 64 bit method pointer. 969 ShouldNotReachHere(); 970 __ ldr(dest->as_register(), as_Address(from_addr)); 971 break; 972 case T_ADDRESS: 973 // FIXME: OMG this is a horrible kludge. Any offset from an 974 // address that matches klass_offset_in_bytes() will be loaded 975 // as a word, not a long. 976 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 977 __ ldrw(dest->as_register(), as_Address(from_addr)); 978 } else { 979 __ ldr(dest->as_register(), as_Address(from_addr)); 980 } 981 break; 982 case T_INT: 983 __ ldrw(dest->as_register(), as_Address(from_addr)); 984 break; 985 986 case T_LONG: { 987 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr)); 988 break; 989 } 990 991 case T_BYTE: 992 __ ldrsb(dest->as_register(), as_Address(from_addr)); 993 break; 994 case T_BOOLEAN: { 995 __ ldrb(dest->as_register(), as_Address(from_addr)); 996 break; 997 } 998 999 case T_CHAR: 1000 __ ldrh(dest->as_register(), as_Address(from_addr)); 1001 break; 1002 case T_SHORT: 1003 __ ldrsh(dest->as_register(), as_Address(from_addr)); 1004 break; 1005 1006 default: 1007 ShouldNotReachHere(); 1008 } 1009 1010 if (type == T_ARRAY || type == T_OBJECT) { 1011 if (UseCompressedOops && !wide) { 1012 __ decode_heap_oop(dest->as_register()); 1013 } 1014 __ verify_oop(dest->as_register()); 1015 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1016 if (UseCompressedClassPointers) { 1017 __ decode_klass_not_null(dest->as_register()); 1018 } 1019 } 1020 } 1021 1022 1023 int LIR_Assembler::array_element_size(BasicType type) const { 1024 int elem_size = type2aelembytes(type); 1025 return exact_log2(elem_size); 1026 } 1027 1028 void LIR_Assembler::arithmetic_idiv(LIR_Op3* op, bool is_irem) { 1029 Register Rdividend = op->in_opr1()->as_register(); 1030 Register Rdivisor = op->in_opr2()->as_register(); 1031 Register Rscratch = op->in_opr3()->as_register(); 1032 Register Rresult = op->result_opr()->as_register(); 1033 int divisor = -1; 1034 1035 /* 1036 TODO: For some reason, using the Rscratch that gets passed in is 1037 not possible because the register allocator does not see the tmp reg 1038 as used, and assignes it the same register as Rdividend. We use rscratch1 1039 instead. 1040 1041 assert(Rdividend != Rscratch, ""); 1042 assert(Rdivisor != Rscratch, ""); 1043 */ 1044 1045 if (Rdivisor == noreg && is_power_of_2(divisor)) { 1046 // convert division by a power of two into some shifts and logical operations 1047 } 1048 1049 __ corrected_idivl(Rresult, Rdividend, Rdivisor, is_irem, rscratch1); 1050 } 1051 1052 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1053 switch (op->code()) { 1054 case lir_idiv: 1055 arithmetic_idiv(op, false); 1056 break; 1057 case lir_irem: 1058 arithmetic_idiv(op, true); 1059 break; 1060 case lir_fmad: 1061 __ fmaddd(op->result_opr()->as_double_reg(), 1062 op->in_opr1()->as_double_reg(), 1063 op->in_opr2()->as_double_reg(), 1064 op->in_opr3()->as_double_reg()); 1065 break; 1066 case lir_fmaf: 1067 __ fmadds(op->result_opr()->as_float_reg(), 1068 op->in_opr1()->as_float_reg(), 1069 op->in_opr2()->as_float_reg(), 1070 op->in_opr3()->as_float_reg()); 1071 break; 1072 default: ShouldNotReachHere(); break; 1073 } 1074 } 1075 1076 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1077 #ifdef ASSERT 1078 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1079 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1080 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1081 #endif 1082 1083 if (op->cond() == lir_cond_always) { 1084 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1085 __ b(*(op->label())); 1086 } else { 1087 Assembler::Condition acond; 1088 if (op->code() == lir_cond_float_branch) { 1089 bool is_unordered = (op->ublock() == op->block()); 1090 // Assembler::EQ does not permit unordered branches, so we add 1091 // another branch here. Likewise, Assembler::NE does not permit 1092 // ordered branches. 1093 if (is_unordered && op->cond() == lir_cond_equal 1094 || !is_unordered && op->cond() == lir_cond_notEqual) 1095 __ br(Assembler::VS, *(op->ublock()->label())); 1096 switch(op->cond()) { 1097 case lir_cond_equal: acond = Assembler::EQ; break; 1098 case lir_cond_notEqual: acond = Assembler::NE; break; 1099 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break; 1100 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break; 1101 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break; 1102 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break; 1103 default: ShouldNotReachHere(); 1104 acond = Assembler::EQ; // unreachable 1105 } 1106 } else { 1107 switch (op->cond()) { 1108 case lir_cond_equal: acond = Assembler::EQ; break; 1109 case lir_cond_notEqual: acond = Assembler::NE; break; 1110 case lir_cond_less: acond = Assembler::LT; break; 1111 case lir_cond_lessEqual: acond = Assembler::LE; break; 1112 case lir_cond_greaterEqual: acond = Assembler::GE; break; 1113 case lir_cond_greater: acond = Assembler::GT; break; 1114 case lir_cond_belowEqual: acond = Assembler::LS; break; 1115 case lir_cond_aboveEqual: acond = Assembler::HS; break; 1116 default: ShouldNotReachHere(); 1117 acond = Assembler::EQ; // unreachable 1118 } 1119 } 1120 __ br(acond,*(op->label())); 1121 } 1122 } 1123 1124 1125 1126 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1127 LIR_Opr src = op->in_opr(); 1128 LIR_Opr dest = op->result_opr(); 1129 1130 switch (op->bytecode()) { 1131 case Bytecodes::_i2f: 1132 { 1133 __ scvtfws(dest->as_float_reg(), src->as_register()); 1134 break; 1135 } 1136 case Bytecodes::_i2d: 1137 { 1138 __ scvtfwd(dest->as_double_reg(), src->as_register()); 1139 break; 1140 } 1141 case Bytecodes::_l2d: 1142 { 1143 __ scvtfd(dest->as_double_reg(), src->as_register_lo()); 1144 break; 1145 } 1146 case Bytecodes::_l2f: 1147 { 1148 __ scvtfs(dest->as_float_reg(), src->as_register_lo()); 1149 break; 1150 } 1151 case Bytecodes::_f2d: 1152 { 1153 __ fcvts(dest->as_double_reg(), src->as_float_reg()); 1154 break; 1155 } 1156 case Bytecodes::_d2f: 1157 { 1158 __ fcvtd(dest->as_float_reg(), src->as_double_reg()); 1159 break; 1160 } 1161 case Bytecodes::_i2c: 1162 { 1163 __ ubfx(dest->as_register(), src->as_register(), 0, 16); 1164 break; 1165 } 1166 case Bytecodes::_i2l: 1167 { 1168 __ sxtw(dest->as_register_lo(), src->as_register()); 1169 break; 1170 } 1171 case Bytecodes::_i2s: 1172 { 1173 __ sxth(dest->as_register(), src->as_register()); 1174 break; 1175 } 1176 case Bytecodes::_i2b: 1177 { 1178 __ sxtb(dest->as_register(), src->as_register()); 1179 break; 1180 } 1181 case Bytecodes::_l2i: 1182 { 1183 _masm->block_comment("FIXME: This could be a no-op"); 1184 __ uxtw(dest->as_register(), src->as_register_lo()); 1185 break; 1186 } 1187 case Bytecodes::_d2l: 1188 { 1189 __ fcvtzd(dest->as_register_lo(), src->as_double_reg()); 1190 break; 1191 } 1192 case Bytecodes::_f2i: 1193 { 1194 __ fcvtzsw(dest->as_register(), src->as_float_reg()); 1195 break; 1196 } 1197 case Bytecodes::_f2l: 1198 { 1199 __ fcvtzs(dest->as_register_lo(), src->as_float_reg()); 1200 break; 1201 } 1202 case Bytecodes::_d2i: 1203 { 1204 __ fcvtzdw(dest->as_register(), src->as_double_reg()); 1205 break; 1206 } 1207 default: ShouldNotReachHere(); 1208 } 1209 } 1210 1211 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1212 if (op->init_check()) { 1213 __ ldrb(rscratch1, Address(op->klass()->as_register(), 1214 InstanceKlass::init_state_offset())); 1215 __ cmpw(rscratch1, InstanceKlass::fully_initialized); 1216 add_debug_info_for_null_check_here(op->stub()->info()); 1217 __ br(Assembler::NE, *op->stub()->entry()); 1218 } 1219 __ allocate_object(op->obj()->as_register(), 1220 op->tmp1()->as_register(), 1221 op->tmp2()->as_register(), 1222 op->header_size(), 1223 op->object_size(), 1224 op->klass()->as_register(), 1225 *op->stub()->entry()); 1226 __ bind(*op->stub()->continuation()); 1227 } 1228 1229 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1230 Register len = op->len()->as_register(); 1231 __ uxtw(len, len); 1232 1233 if (UseSlowPath || 1234 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1235 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1236 __ b(*op->stub()->entry()); 1237 } else { 1238 Register tmp1 = op->tmp1()->as_register(); 1239 Register tmp2 = op->tmp2()->as_register(); 1240 Register tmp3 = op->tmp3()->as_register(); 1241 if (len == tmp1) { 1242 tmp1 = tmp3; 1243 } else if (len == tmp2) { 1244 tmp2 = tmp3; 1245 } else if (len == tmp3) { 1246 // everything is ok 1247 } else { 1248 __ mov(tmp3, len); 1249 } 1250 __ allocate_array(op->obj()->as_register(), 1251 len, 1252 tmp1, 1253 tmp2, 1254 arrayOopDesc::header_size(op->type()), 1255 array_element_size(op->type()), 1256 op->klass()->as_register(), 1257 *op->stub()->entry()); 1258 } 1259 __ bind(*op->stub()->continuation()); 1260 } 1261 1262 void LIR_Assembler::type_profile_helper(Register mdo, 1263 ciMethodData *md, ciProfileData *data, 1264 Register recv, Label* update_done) { 1265 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1266 Label next_test; 1267 // See if the receiver is receiver[n]. 1268 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1269 __ ldr(rscratch1, Address(rscratch2)); 1270 __ cmp(recv, rscratch1); 1271 __ br(Assembler::NE, next_test); 1272 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1273 __ addptr(data_addr, DataLayout::counter_increment); 1274 __ b(*update_done); 1275 __ bind(next_test); 1276 } 1277 1278 // Didn't find receiver; find next empty slot and fill it in 1279 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1280 Label next_test; 1281 __ lea(rscratch2, 1282 Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1283 Address recv_addr(rscratch2); 1284 __ ldr(rscratch1, recv_addr); 1285 __ cbnz(rscratch1, next_test); 1286 __ str(recv, recv_addr); 1287 __ mov(rscratch1, DataLayout::counter_increment); 1288 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); 1289 __ str(rscratch1, Address(rscratch2)); 1290 __ b(*update_done); 1291 __ bind(next_test); 1292 } 1293 } 1294 1295 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1296 // we always need a stub for the failure case. 1297 CodeStub* stub = op->stub(); 1298 Register obj = op->object()->as_register(); 1299 Register k_RInfo = op->tmp1()->as_register(); 1300 Register klass_RInfo = op->tmp2()->as_register(); 1301 Register dst = op->result_opr()->as_register(); 1302 ciKlass* k = op->klass(); 1303 Register Rtmp1 = noreg; 1304 1305 // check if it needs to be profiled 1306 ciMethodData* md; 1307 ciProfileData* data; 1308 1309 const bool should_profile = op->should_profile(); 1310 1311 if (should_profile) { 1312 ciMethod* method = op->profiled_method(); 1313 assert(method != NULL, "Should have method"); 1314 int bci = op->profiled_bci(); 1315 md = method->method_data_or_null(); 1316 assert(md != NULL, "Sanity"); 1317 data = md->bci_to_data(bci); 1318 assert(data != NULL, "need data for type check"); 1319 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1320 } 1321 Label profile_cast_success, profile_cast_failure; 1322 Label *success_target = should_profile ? &profile_cast_success : success; 1323 Label *failure_target = should_profile ? &profile_cast_failure : failure; 1324 1325 if (obj == k_RInfo) { 1326 k_RInfo = dst; 1327 } else if (obj == klass_RInfo) { 1328 klass_RInfo = dst; 1329 } 1330 if (k->is_loaded() && !UseCompressedClassPointers) { 1331 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1332 } else { 1333 Rtmp1 = op->tmp3()->as_register(); 1334 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1335 } 1336 1337 assert_different_registers(obj, k_RInfo, klass_RInfo); 1338 1339 if (should_profile) { 1340 Label not_null; 1341 __ cbnz(obj, not_null); 1342 // Object is null; update MDO and exit 1343 Register mdo = klass_RInfo; 1344 __ mov_metadata(mdo, md->constant_encoding()); 1345 Address data_addr 1346 = __ form_address(rscratch2, mdo, 1347 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 1348 0); 1349 __ ldrb(rscratch1, data_addr); 1350 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant()); 1351 __ strb(rscratch1, data_addr); 1352 __ b(*obj_is_null); 1353 __ bind(not_null); 1354 } else { 1355 __ cbz(obj, *obj_is_null); 1356 } 1357 1358 if (!k->is_loaded()) { 1359 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1360 } else { 1361 __ mov_metadata(k_RInfo, k->constant_encoding()); 1362 } 1363 __ verify_oop(obj); 1364 1365 if (op->fast_check()) { 1366 // get object class 1367 // not a safepoint as obj null check happens earlier 1368 __ load_klass(rscratch1, obj); 1369 __ cmp( rscratch1, k_RInfo); 1370 1371 __ br(Assembler::NE, *failure_target); 1372 // successful cast, fall through to profile or jump 1373 } else { 1374 // get object class 1375 // not a safepoint as obj null check happens earlier 1376 __ load_klass(klass_RInfo, obj); 1377 if (k->is_loaded()) { 1378 // See if we get an immediate positive hit 1379 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset()))); 1380 __ cmp(k_RInfo, rscratch1); 1381 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1382 __ br(Assembler::NE, *failure_target); 1383 // successful cast, fall through to profile or jump 1384 } else { 1385 // See if we get an immediate positive hit 1386 __ br(Assembler::EQ, *success_target); 1387 // check for self 1388 __ cmp(klass_RInfo, k_RInfo); 1389 __ br(Assembler::EQ, *success_target); 1390 1391 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1392 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1393 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1394 // result is a boolean 1395 __ cbzw(klass_RInfo, *failure_target); 1396 // successful cast, fall through to profile or jump 1397 } 1398 } else { 1399 // perform the fast part of the checking logic 1400 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1401 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1402 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1403 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1404 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1405 // result is a boolean 1406 __ cbz(k_RInfo, *failure_target); 1407 // successful cast, fall through to profile or jump 1408 } 1409 } 1410 if (should_profile) { 1411 Register mdo = klass_RInfo, recv = k_RInfo; 1412 __ bind(profile_cast_success); 1413 __ mov_metadata(mdo, md->constant_encoding()); 1414 __ load_klass(recv, obj); 1415 Label update_done; 1416 type_profile_helper(mdo, md, data, recv, success); 1417 __ b(*success); 1418 1419 __ bind(profile_cast_failure); 1420 __ mov_metadata(mdo, md->constant_encoding()); 1421 Address counter_addr 1422 = __ form_address(rscratch2, mdo, 1423 md->byte_offset_of_slot(data, CounterData::count_offset()), 1424 0); 1425 __ ldr(rscratch1, counter_addr); 1426 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1427 __ str(rscratch1, counter_addr); 1428 __ b(*failure); 1429 } 1430 __ b(*success); 1431 } 1432 1433 1434 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1435 const bool should_profile = op->should_profile(); 1436 1437 LIR_Code code = op->code(); 1438 if (code == lir_store_check) { 1439 Register value = op->object()->as_register(); 1440 Register array = op->array()->as_register(); 1441 Register k_RInfo = op->tmp1()->as_register(); 1442 Register klass_RInfo = op->tmp2()->as_register(); 1443 Register Rtmp1 = op->tmp3()->as_register(); 1444 1445 CodeStub* stub = op->stub(); 1446 1447 // check if it needs to be profiled 1448 ciMethodData* md; 1449 ciProfileData* data; 1450 1451 if (should_profile) { 1452 ciMethod* method = op->profiled_method(); 1453 assert(method != NULL, "Should have method"); 1454 int bci = op->profiled_bci(); 1455 md = method->method_data_or_null(); 1456 assert(md != NULL, "Sanity"); 1457 data = md->bci_to_data(bci); 1458 assert(data != NULL, "need data for type check"); 1459 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1460 } 1461 Label profile_cast_success, profile_cast_failure, done; 1462 Label *success_target = should_profile ? &profile_cast_success : &done; 1463 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); 1464 1465 if (should_profile) { 1466 Label not_null; 1467 __ cbnz(value, not_null); 1468 // Object is null; update MDO and exit 1469 Register mdo = klass_RInfo; 1470 __ mov_metadata(mdo, md->constant_encoding()); 1471 Address data_addr 1472 = __ form_address(rscratch2, mdo, 1473 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 1474 0); 1475 __ ldrb(rscratch1, data_addr); 1476 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant()); 1477 __ strb(rscratch1, data_addr); 1478 __ b(done); 1479 __ bind(not_null); 1480 } else { 1481 __ cbz(value, done); 1482 } 1483 1484 add_debug_info_for_null_check_here(op->info_for_exception()); 1485 __ load_klass(k_RInfo, array); 1486 __ load_klass(klass_RInfo, value); 1487 1488 // get instance klass (it's already uncompressed) 1489 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1490 // perform the fast part of the checking logic 1491 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1492 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1493 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1494 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1495 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1496 // result is a boolean 1497 __ cbzw(k_RInfo, *failure_target); 1498 // fall through to the success case 1499 1500 if (should_profile) { 1501 Register mdo = klass_RInfo, recv = k_RInfo; 1502 __ bind(profile_cast_success); 1503 __ mov_metadata(mdo, md->constant_encoding()); 1504 __ load_klass(recv, value); 1505 Label update_done; 1506 type_profile_helper(mdo, md, data, recv, &done); 1507 __ b(done); 1508 1509 __ bind(profile_cast_failure); 1510 __ mov_metadata(mdo, md->constant_encoding()); 1511 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1512 __ lea(rscratch2, counter_addr); 1513 __ ldr(rscratch1, Address(rscratch2)); 1514 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1515 __ str(rscratch1, Address(rscratch2)); 1516 __ b(*stub->entry()); 1517 } 1518 1519 __ bind(done); 1520 } else if (code == lir_checkcast) { 1521 Register obj = op->object()->as_register(); 1522 Register dst = op->result_opr()->as_register(); 1523 Label success; 1524 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1525 __ bind(success); 1526 if (dst != obj) { 1527 __ mov(dst, obj); 1528 } 1529 } else if (code == lir_instanceof) { 1530 Register obj = op->object()->as_register(); 1531 Register dst = op->result_opr()->as_register(); 1532 Label success, failure, done; 1533 emit_typecheck_helper(op, &success, &failure, &failure); 1534 __ bind(failure); 1535 __ mov(dst, zr); 1536 __ b(done); 1537 __ bind(success); 1538 __ mov(dst, 1); 1539 __ bind(done); 1540 } else { 1541 ShouldNotReachHere(); 1542 } 1543 } 1544 1545 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { 1546 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1547 __ cset(rscratch1, Assembler::NE); 1548 __ membar(__ AnyAny); 1549 } 1550 1551 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { 1552 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1553 __ cset(rscratch1, Assembler::NE); 1554 __ membar(__ AnyAny); 1555 } 1556 1557 1558 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1559 assert(VM_Version::supports_cx8(), "wrong machine"); 1560 Register addr; 1561 if (op->addr()->is_register()) { 1562 addr = as_reg(op->addr()); 1563 } else { 1564 assert(op->addr()->is_address(), "what else?"); 1565 LIR_Address* addr_ptr = op->addr()->as_address_ptr(); 1566 assert(addr_ptr->disp() == 0, "need 0 disp"); 1567 assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index"); 1568 addr = as_reg(addr_ptr->base()); 1569 } 1570 Register newval = as_reg(op->new_value()); 1571 Register cmpval = as_reg(op->cmp_value()); 1572 1573 if (op->code() == lir_cas_obj) { 1574 if (UseCompressedOops) { 1575 Register t1 = op->tmp1()->as_register(); 1576 assert(op->tmp1()->is_valid(), "must be"); 1577 __ encode_heap_oop(t1, cmpval); 1578 cmpval = t1; 1579 __ encode_heap_oop(rscratch2, newval); 1580 newval = rscratch2; 1581 casw(addr, newval, cmpval); 1582 } else { 1583 casl(addr, newval, cmpval); 1584 } 1585 } else if (op->code() == lir_cas_int) { 1586 casw(addr, newval, cmpval); 1587 } else { 1588 casl(addr, newval, cmpval); 1589 } 1590 } 1591 1592 1593 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1594 1595 Assembler::Condition acond, ncond; 1596 switch (condition) { 1597 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break; 1598 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break; 1599 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break; 1600 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break; 1601 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break; 1602 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break; 1603 case lir_cond_belowEqual: 1604 case lir_cond_aboveEqual: 1605 default: ShouldNotReachHere(); 1606 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable 1607 } 1608 1609 assert(result->is_single_cpu() || result->is_double_cpu(), 1610 "expect single register for result"); 1611 if (opr1->is_constant() && opr2->is_constant() 1612 && opr1->type() == T_INT && opr2->type() == T_INT) { 1613 jint val1 = opr1->as_jint(); 1614 jint val2 = opr2->as_jint(); 1615 if (val1 == 0 && val2 == 1) { 1616 __ cset(result->as_register(), ncond); 1617 return; 1618 } else if (val1 == 1 && val2 == 0) { 1619 __ cset(result->as_register(), acond); 1620 return; 1621 } 1622 } 1623 1624 if (opr1->is_constant() && opr2->is_constant() 1625 && opr1->type() == T_LONG && opr2->type() == T_LONG) { 1626 jlong val1 = opr1->as_jlong(); 1627 jlong val2 = opr2->as_jlong(); 1628 if (val1 == 0 && val2 == 1) { 1629 __ cset(result->as_register_lo(), ncond); 1630 return; 1631 } else if (val1 == 1 && val2 == 0) { 1632 __ cset(result->as_register_lo(), acond); 1633 return; 1634 } 1635 } 1636 1637 if (opr1->is_stack()) { 1638 stack2reg(opr1, FrameMap::rscratch1_opr, result->type()); 1639 opr1 = FrameMap::rscratch1_opr; 1640 } else if (opr1->is_constant()) { 1641 LIR_Opr tmp 1642 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr; 1643 const2reg(opr1, tmp, lir_patch_none, NULL); 1644 opr1 = tmp; 1645 } 1646 1647 if (opr2->is_stack()) { 1648 stack2reg(opr2, FrameMap::rscratch2_opr, result->type()); 1649 opr2 = FrameMap::rscratch2_opr; 1650 } else if (opr2->is_constant()) { 1651 LIR_Opr tmp 1652 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr; 1653 const2reg(opr2, tmp, lir_patch_none, NULL); 1654 opr2 = tmp; 1655 } 1656 1657 if (result->type() == T_LONG) 1658 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond); 1659 else 1660 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond); 1661 } 1662 1663 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1664 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1665 1666 if (left->is_single_cpu()) { 1667 Register lreg = left->as_register(); 1668 Register dreg = as_reg(dest); 1669 1670 if (right->is_single_cpu()) { 1671 // cpu register - cpu register 1672 1673 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, 1674 "should be"); 1675 Register rreg = right->as_register(); 1676 switch (code) { 1677 case lir_add: __ addw (dest->as_register(), lreg, rreg); break; 1678 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break; 1679 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break; 1680 default: ShouldNotReachHere(); 1681 } 1682 1683 } else if (right->is_double_cpu()) { 1684 Register rreg = right->as_register_lo(); 1685 // single_cpu + double_cpu: can happen with obj+long 1686 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1687 switch (code) { 1688 case lir_add: __ add(dreg, lreg, rreg); break; 1689 case lir_sub: __ sub(dreg, lreg, rreg); break; 1690 default: ShouldNotReachHere(); 1691 } 1692 } else if (right->is_constant()) { 1693 // cpu register - constant 1694 jlong c; 1695 1696 // FIXME. This is fugly: we really need to factor all this logic. 1697 switch(right->type()) { 1698 case T_LONG: 1699 c = right->as_constant_ptr()->as_jlong(); 1700 break; 1701 case T_INT: 1702 case T_ADDRESS: 1703 c = right->as_constant_ptr()->as_jint(); 1704 break; 1705 default: 1706 ShouldNotReachHere(); 1707 c = 0; // unreachable 1708 break; 1709 } 1710 1711 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1712 if (c == 0 && dreg == lreg) { 1713 COMMENT("effective nop elided"); 1714 return; 1715 } 1716 switch(left->type()) { 1717 case T_INT: 1718 switch (code) { 1719 case lir_add: __ addw(dreg, lreg, c); break; 1720 case lir_sub: __ subw(dreg, lreg, c); break; 1721 default: ShouldNotReachHere(); 1722 } 1723 break; 1724 case T_OBJECT: 1725 case T_ADDRESS: 1726 switch (code) { 1727 case lir_add: __ add(dreg, lreg, c); break; 1728 case lir_sub: __ sub(dreg, lreg, c); break; 1729 default: ShouldNotReachHere(); 1730 } 1731 break; 1732 ShouldNotReachHere(); 1733 } 1734 } else { 1735 ShouldNotReachHere(); 1736 } 1737 1738 } else if (left->is_double_cpu()) { 1739 Register lreg_lo = left->as_register_lo(); 1740 1741 if (right->is_double_cpu()) { 1742 // cpu register - cpu register 1743 Register rreg_lo = right->as_register_lo(); 1744 switch (code) { 1745 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1746 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1747 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1748 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break; 1749 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break; 1750 default: 1751 ShouldNotReachHere(); 1752 } 1753 1754 } else if (right->is_constant()) { 1755 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1756 Register dreg = as_reg(dest); 1757 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1758 if (c == 0 && dreg == lreg_lo) { 1759 COMMENT("effective nop elided"); 1760 return; 1761 } 1762 switch (code) { 1763 case lir_add: __ add(dreg, lreg_lo, c); break; 1764 case lir_sub: __ sub(dreg, lreg_lo, c); break; 1765 default: 1766 ShouldNotReachHere(); 1767 } 1768 } else { 1769 ShouldNotReachHere(); 1770 } 1771 } else if (left->is_single_fpu()) { 1772 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register"); 1773 switch (code) { 1774 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1775 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1776 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1777 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1778 default: 1779 ShouldNotReachHere(); 1780 } 1781 } else if (left->is_double_fpu()) { 1782 if (right->is_double_fpu()) { 1783 // cpu register - cpu register 1784 switch (code) { 1785 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1786 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1787 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1788 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1789 default: 1790 ShouldNotReachHere(); 1791 } 1792 } else { 1793 if (right->is_constant()) { 1794 ShouldNotReachHere(); 1795 } 1796 ShouldNotReachHere(); 1797 } 1798 } else if (left->is_single_stack() || left->is_address()) { 1799 assert(left == dest, "left and dest must be equal"); 1800 ShouldNotReachHere(); 1801 } else { 1802 ShouldNotReachHere(); 1803 } 1804 } 1805 1806 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); } 1807 1808 1809 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1810 switch(code) { 1811 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break; 1812 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break; 1813 default : ShouldNotReachHere(); 1814 } 1815 } 1816 1817 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1818 1819 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); 1820 Register Rleft = left->is_single_cpu() ? left->as_register() : 1821 left->as_register_lo(); 1822 if (dst->is_single_cpu()) { 1823 Register Rdst = dst->as_register(); 1824 if (right->is_constant()) { 1825 switch (code) { 1826 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break; 1827 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break; 1828 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break; 1829 default: ShouldNotReachHere(); break; 1830 } 1831 } else { 1832 Register Rright = right->is_single_cpu() ? right->as_register() : 1833 right->as_register_lo(); 1834 switch (code) { 1835 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break; 1836 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break; 1837 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break; 1838 default: ShouldNotReachHere(); break; 1839 } 1840 } 1841 } else { 1842 Register Rdst = dst->as_register_lo(); 1843 if (right->is_constant()) { 1844 switch (code) { 1845 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break; 1846 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break; 1847 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break; 1848 default: ShouldNotReachHere(); break; 1849 } 1850 } else { 1851 Register Rright = right->is_single_cpu() ? right->as_register() : 1852 right->as_register_lo(); 1853 switch (code) { 1854 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break; 1855 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break; 1856 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break; 1857 default: ShouldNotReachHere(); break; 1858 } 1859 } 1860 } 1861 } 1862 1863 1864 1865 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); } 1866 1867 1868 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1869 if (opr1->is_constant() && opr2->is_single_cpu()) { 1870 // tableswitch 1871 Register reg = as_reg(opr2); 1872 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()]; 1873 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after); 1874 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { 1875 Register reg1 = as_reg(opr1); 1876 if (opr2->is_single_cpu()) { 1877 // cpu register - cpu register 1878 Register reg2 = opr2->as_register(); 1879 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1880 __ cmpoop(reg1, reg2); 1881 } else { 1882 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 1883 __ cmpw(reg1, reg2); 1884 } 1885 return; 1886 } 1887 if (opr2->is_double_cpu()) { 1888 // cpu register - cpu register 1889 Register reg2 = opr2->as_register_lo(); 1890 __ cmp(reg1, reg2); 1891 return; 1892 } 1893 1894 if (opr2->is_constant()) { 1895 bool is_32bit = false; // width of register operand 1896 jlong imm; 1897 1898 switch(opr2->type()) { 1899 case T_INT: 1900 imm = opr2->as_constant_ptr()->as_jint(); 1901 is_32bit = true; 1902 break; 1903 case T_LONG: 1904 imm = opr2->as_constant_ptr()->as_jlong(); 1905 break; 1906 case T_ADDRESS: 1907 imm = opr2->as_constant_ptr()->as_jint(); 1908 break; 1909 case T_OBJECT: 1910 case T_ARRAY: 1911 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1); 1912 __ cmpoop(reg1, rscratch1); 1913 return; 1914 default: 1915 ShouldNotReachHere(); 1916 imm = 0; // unreachable 1917 break; 1918 } 1919 1920 if (Assembler::operand_valid_for_add_sub_immediate(imm)) { 1921 if (is_32bit) 1922 __ cmpw(reg1, imm); 1923 else 1924 __ subs(zr, reg1, imm); 1925 return; 1926 } else { 1927 __ mov(rscratch1, imm); 1928 if (is_32bit) 1929 __ cmpw(reg1, rscratch1); 1930 else 1931 __ cmp(reg1, rscratch1); 1932 return; 1933 } 1934 } else 1935 ShouldNotReachHere(); 1936 } else if (opr1->is_single_fpu()) { 1937 FloatRegister reg1 = opr1->as_float_reg(); 1938 assert(opr2->is_single_fpu(), "expect single float register"); 1939 FloatRegister reg2 = opr2->as_float_reg(); 1940 __ fcmps(reg1, reg2); 1941 } else if (opr1->is_double_fpu()) { 1942 FloatRegister reg1 = opr1->as_double_reg(); 1943 assert(opr2->is_double_fpu(), "expect double float register"); 1944 FloatRegister reg2 = opr2->as_double_reg(); 1945 __ fcmpd(reg1, reg2); 1946 } else { 1947 ShouldNotReachHere(); 1948 } 1949 } 1950 1951 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1952 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1953 bool is_unordered_less = (code == lir_ucmp_fd2i); 1954 if (left->is_single_fpu()) { 1955 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1956 } else if (left->is_double_fpu()) { 1957 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1958 } else { 1959 ShouldNotReachHere(); 1960 } 1961 } else if (code == lir_cmp_l2i) { 1962 Label done; 1963 __ cmp(left->as_register_lo(), right->as_register_lo()); 1964 __ mov(dst->as_register(), (u_int64_t)-1L); 1965 __ br(Assembler::LT, done); 1966 __ csinc(dst->as_register(), zr, zr, Assembler::EQ); 1967 __ bind(done); 1968 } else { 1969 ShouldNotReachHere(); 1970 } 1971 } 1972 1973 1974 void LIR_Assembler::align_call(LIR_Code code) { } 1975 1976 1977 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 1978 address call = __ trampoline_call(Address(op->addr(), rtype)); 1979 if (call == NULL) { 1980 bailout("trampoline stub overflow"); 1981 return; 1982 } 1983 add_call_info(code_offset(), op->info()); 1984 } 1985 1986 1987 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 1988 address call = __ ic_call(op->addr()); 1989 if (call == NULL) { 1990 bailout("trampoline stub overflow"); 1991 return; 1992 } 1993 add_call_info(code_offset(), op->info()); 1994 } 1995 1996 1997 /* Currently, vtable-dispatch is only enabled for sparc platforms */ 1998 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 1999 ShouldNotReachHere(); 2000 } 2001 2002 2003 void LIR_Assembler::emit_static_call_stub() { 2004 address call_pc = __ pc(); 2005 address stub = __ start_a_stub(call_stub_size()); 2006 if (stub == NULL) { 2007 bailout("static call stub overflow"); 2008 return; 2009 } 2010 2011 int start = __ offset(); 2012 2013 __ relocate(static_stub_Relocation::spec(call_pc)); 2014 __ mov_metadata(rmethod, (Metadata*)NULL); 2015 __ movptr(rscratch1, 0); 2016 __ br(rscratch1); 2017 2018 assert(__ offset() - start <= call_stub_size(), "stub too big"); 2019 __ end_a_stub(); 2020 } 2021 2022 2023 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2024 assert(exceptionOop->as_register() == r0, "must match"); 2025 assert(exceptionPC->as_register() == r3, "must match"); 2026 2027 // exception object is not added to oop map by LinearScan 2028 // (LinearScan assumes that no oops are in fixed registers) 2029 info->add_register_oop(exceptionOop); 2030 Runtime1::StubID unwind_id; 2031 2032 // get current pc information 2033 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2034 int pc_for_athrow_offset = __ offset(); 2035 InternalAddress pc_for_athrow(__ pc()); 2036 __ adr(exceptionPC->as_register(), pc_for_athrow); 2037 add_call_info(pc_for_athrow_offset, info); // for exception handler 2038 2039 __ verify_not_null_oop(r0); 2040 // search an exception handler (r0: exception oop, r3: throwing pc) 2041 if (compilation()->has_fpu_code()) { 2042 unwind_id = Runtime1::handle_exception_id; 2043 } else { 2044 unwind_id = Runtime1::handle_exception_nofpu_id; 2045 } 2046 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2047 2048 // FIXME: enough room for two byte trap ???? 2049 __ nop(); 2050 } 2051 2052 2053 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2054 assert(exceptionOop->as_register() == r0, "must match"); 2055 2056 __ b(_unwind_handler_entry); 2057 } 2058 2059 2060 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2061 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2062 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2063 2064 switch (left->type()) { 2065 case T_INT: { 2066 switch (code) { 2067 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break; 2068 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break; 2069 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break; 2070 default: 2071 ShouldNotReachHere(); 2072 break; 2073 } 2074 break; 2075 case T_LONG: 2076 case T_ADDRESS: 2077 case T_OBJECT: 2078 switch (code) { 2079 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break; 2080 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break; 2081 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break; 2082 default: 2083 ShouldNotReachHere(); 2084 break; 2085 } 2086 break; 2087 default: 2088 ShouldNotReachHere(); 2089 break; 2090 } 2091 } 2092 } 2093 2094 2095 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2096 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2097 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2098 2099 switch (left->type()) { 2100 case T_INT: { 2101 switch (code) { 2102 case lir_shl: __ lslw (dreg, lreg, count); break; 2103 case lir_shr: __ asrw (dreg, lreg, count); break; 2104 case lir_ushr: __ lsrw (dreg, lreg, count); break; 2105 default: 2106 ShouldNotReachHere(); 2107 break; 2108 } 2109 break; 2110 case T_LONG: 2111 case T_ADDRESS: 2112 case T_OBJECT: 2113 switch (code) { 2114 case lir_shl: __ lsl (dreg, lreg, count); break; 2115 case lir_shr: __ asr (dreg, lreg, count); break; 2116 case lir_ushr: __ lsr (dreg, lreg, count); break; 2117 default: 2118 ShouldNotReachHere(); 2119 break; 2120 } 2121 break; 2122 default: 2123 ShouldNotReachHere(); 2124 break; 2125 } 2126 } 2127 } 2128 2129 2130 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 2131 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2132 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2133 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2134 __ str (r, Address(sp, offset_from_rsp_in_bytes)); 2135 } 2136 2137 2138 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 2139 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2140 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2141 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2142 __ mov (rscratch1, c); 2143 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2144 } 2145 2146 2147 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 2148 ShouldNotReachHere(); 2149 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2150 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2151 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2152 __ lea(rscratch1, __ constant_oop_address(o)); 2153 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2154 } 2155 2156 2157 // This code replaces a call to arraycopy; no exception may 2158 // be thrown in this code, they must be thrown in the System.arraycopy 2159 // activation frame; we could save some checks if this would not be the case 2160 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2161 ciArrayKlass* default_type = op->expected_type(); 2162 Register src = op->src()->as_register(); 2163 Register dst = op->dst()->as_register(); 2164 Register src_pos = op->src_pos()->as_register(); 2165 Register dst_pos = op->dst_pos()->as_register(); 2166 Register length = op->length()->as_register(); 2167 Register tmp = op->tmp()->as_register(); 2168 2169 __ resolve(ACCESS_READ, src); 2170 __ resolve(ACCESS_WRITE, dst); 2171 2172 CodeStub* stub = op->stub(); 2173 int flags = op->flags(); 2174 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2175 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2176 2177 // if we don't know anything, just go through the generic arraycopy 2178 if (default_type == NULL // || basic_type == T_OBJECT 2179 ) { 2180 Label done; 2181 assert(src == r1 && src_pos == r2, "mismatch in calling convention"); 2182 2183 // Save the arguments in case the generic arraycopy fails and we 2184 // have to fall back to the JNI stub 2185 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2186 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2187 __ str(src, Address(sp, 4*BytesPerWord)); 2188 2189 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2190 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 2191 2192 // The arguments are in java calling convention so we shift them 2193 // to C convention 2194 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 2195 __ mov(c_rarg0, j_rarg0); 2196 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 2197 __ mov(c_rarg1, j_rarg1); 2198 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 2199 __ mov(c_rarg2, j_rarg2); 2200 assert_different_registers(c_rarg3, j_rarg4); 2201 __ mov(c_rarg3, j_rarg3); 2202 __ mov(c_rarg4, j_rarg4); 2203 #ifndef PRODUCT 2204 if (PrintC1Statistics) { 2205 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 2206 } 2207 #endif 2208 __ far_call(RuntimeAddress(copyfunc_addr)); 2209 2210 __ cbz(r0, *stub->continuation()); 2211 2212 // Reload values from the stack so they are where the stub 2213 // expects them. 2214 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2215 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2216 __ ldr(src, Address(sp, 4*BytesPerWord)); 2217 2218 // r0 is -1^K where K == partial copied count 2219 __ eonw(rscratch1, r0, 0); 2220 // adjust length down and src/end pos up by partial copied count 2221 __ subw(length, length, rscratch1); 2222 __ addw(src_pos, src_pos, rscratch1); 2223 __ addw(dst_pos, dst_pos, rscratch1); 2224 __ b(*stub->entry()); 2225 2226 __ bind(*stub->continuation()); 2227 return; 2228 } 2229 2230 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2231 2232 int elem_size = type2aelembytes(basic_type); 2233 int shift_amount; 2234 int scale = exact_log2(elem_size); 2235 2236 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2237 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2238 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 2239 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2240 2241 // test for NULL 2242 if (flags & LIR_OpArrayCopy::src_null_check) { 2243 __ cbz(src, *stub->entry()); 2244 } 2245 if (flags & LIR_OpArrayCopy::dst_null_check) { 2246 __ cbz(dst, *stub->entry()); 2247 } 2248 2249 // If the compiler was not able to prove that exact type of the source or the destination 2250 // of the arraycopy is an array type, check at runtime if the source or the destination is 2251 // an instance type. 2252 if (flags & LIR_OpArrayCopy::type_check) { 2253 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2254 __ load_klass(tmp, dst); 2255 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2256 __ cmpw(rscratch1, Klass::_lh_neutral_value); 2257 __ br(Assembler::GE, *stub->entry()); 2258 } 2259 2260 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2261 __ load_klass(tmp, src); 2262 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2263 __ cmpw(rscratch1, Klass::_lh_neutral_value); 2264 __ br(Assembler::GE, *stub->entry()); 2265 } 2266 } 2267 2268 // check if negative 2269 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2270 __ cmpw(src_pos, 0); 2271 __ br(Assembler::LT, *stub->entry()); 2272 } 2273 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2274 __ cmpw(dst_pos, 0); 2275 __ br(Assembler::LT, *stub->entry()); 2276 } 2277 2278 if (flags & LIR_OpArrayCopy::length_positive_check) { 2279 __ cmpw(length, 0); 2280 __ br(Assembler::LT, *stub->entry()); 2281 } 2282 2283 if (flags & LIR_OpArrayCopy::src_range_check) { 2284 __ addw(tmp, src_pos, length); 2285 __ ldrw(rscratch1, src_length_addr); 2286 __ cmpw(tmp, rscratch1); 2287 __ br(Assembler::HI, *stub->entry()); 2288 } 2289 if (flags & LIR_OpArrayCopy::dst_range_check) { 2290 __ addw(tmp, dst_pos, length); 2291 __ ldrw(rscratch1, dst_length_addr); 2292 __ cmpw(tmp, rscratch1); 2293 __ br(Assembler::HI, *stub->entry()); 2294 } 2295 2296 if (flags & LIR_OpArrayCopy::type_check) { 2297 // We don't know the array types are compatible 2298 if (basic_type != T_OBJECT) { 2299 // Simple test for basic type arrays 2300 if (UseCompressedClassPointers) { 2301 __ ldrw(tmp, src_klass_addr); 2302 __ ldrw(rscratch1, dst_klass_addr); 2303 __ cmpw(tmp, rscratch1); 2304 } else { 2305 __ ldr(tmp, src_klass_addr); 2306 __ ldr(rscratch1, dst_klass_addr); 2307 __ cmp(tmp, rscratch1); 2308 } 2309 __ br(Assembler::NE, *stub->entry()); 2310 } else { 2311 // For object arrays, if src is a sub class of dst then we can 2312 // safely do the copy. 2313 Label cont, slow; 2314 2315 #define PUSH(r1, r2) \ 2316 stp(r1, r2, __ pre(sp, -2 * wordSize)); 2317 2318 #define POP(r1, r2) \ 2319 ldp(r1, r2, __ post(sp, 2 * wordSize)); 2320 2321 __ PUSH(src, dst); 2322 2323 __ load_klass(src, src); 2324 __ load_klass(dst, dst); 2325 2326 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); 2327 2328 __ PUSH(src, dst); 2329 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 2330 __ POP(src, dst); 2331 2332 __ cbnz(src, cont); 2333 2334 __ bind(slow); 2335 __ POP(src, dst); 2336 2337 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2338 if (copyfunc_addr != NULL) { // use stub if available 2339 // src is not a sub class of dst so we have to do a 2340 // per-element check. 2341 2342 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2343 if ((flags & mask) != mask) { 2344 // Check that at least both of them object arrays. 2345 assert(flags & mask, "one of the two should be known to be an object array"); 2346 2347 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2348 __ load_klass(tmp, src); 2349 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2350 __ load_klass(tmp, dst); 2351 } 2352 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2353 Address klass_lh_addr(tmp, lh_offset); 2354 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2355 __ ldrw(rscratch1, klass_lh_addr); 2356 __ mov(rscratch2, objArray_lh); 2357 __ eorw(rscratch1, rscratch1, rscratch2); 2358 __ cbnzw(rscratch1, *stub->entry()); 2359 } 2360 2361 // Spill because stubs can use any register they like and it's 2362 // easier to restore just those that we care about. 2363 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2364 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2365 __ str(src, Address(sp, 4*BytesPerWord)); 2366 2367 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2368 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2369 assert_different_registers(c_rarg0, dst, dst_pos, length); 2370 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2371 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2372 assert_different_registers(c_rarg1, dst, length); 2373 __ uxtw(c_rarg2, length); 2374 assert_different_registers(c_rarg2, dst); 2375 2376 __ load_klass(c_rarg4, dst); 2377 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 2378 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 2379 __ far_call(RuntimeAddress(copyfunc_addr)); 2380 2381 #ifndef PRODUCT 2382 if (PrintC1Statistics) { 2383 Label failed; 2384 __ cbnz(r0, failed); 2385 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); 2386 __ bind(failed); 2387 } 2388 #endif 2389 2390 __ cbz(r0, *stub->continuation()); 2391 2392 #ifndef PRODUCT 2393 if (PrintC1Statistics) { 2394 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); 2395 } 2396 #endif 2397 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1); 2398 2399 // Restore previously spilled arguments 2400 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2401 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2402 __ ldr(src, Address(sp, 4*BytesPerWord)); 2403 2404 // return value is -1^K where K is partial copied count 2405 __ eonw(rscratch1, r0, zr); 2406 // adjust length down and src/end pos up by partial copied count 2407 __ subw(length, length, rscratch1); 2408 __ addw(src_pos, src_pos, rscratch1); 2409 __ addw(dst_pos, dst_pos, rscratch1); 2410 } 2411 2412 __ b(*stub->entry()); 2413 2414 __ bind(cont); 2415 __ POP(src, dst); 2416 } 2417 } 2418 2419 #ifdef ASSERT 2420 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2421 // Sanity check the known type with the incoming class. For the 2422 // primitive case the types must match exactly with src.klass and 2423 // dst.klass each exactly matching the default type. For the 2424 // object array case, if no type check is needed then either the 2425 // dst type is exactly the expected type and the src type is a 2426 // subtype which we can't check or src is the same array as dst 2427 // but not necessarily exactly of type default_type. 2428 Label known_ok, halt; 2429 __ mov_metadata(tmp, default_type->constant_encoding()); 2430 if (UseCompressedClassPointers) { 2431 __ encode_klass_not_null(tmp); 2432 } 2433 2434 if (basic_type != T_OBJECT) { 2435 2436 if (UseCompressedClassPointers) { 2437 __ ldrw(rscratch1, dst_klass_addr); 2438 __ cmpw(tmp, rscratch1); 2439 } else { 2440 __ ldr(rscratch1, dst_klass_addr); 2441 __ cmp(tmp, rscratch1); 2442 } 2443 __ br(Assembler::NE, halt); 2444 if (UseCompressedClassPointers) { 2445 __ ldrw(rscratch1, src_klass_addr); 2446 __ cmpw(tmp, rscratch1); 2447 } else { 2448 __ ldr(rscratch1, src_klass_addr); 2449 __ cmp(tmp, rscratch1); 2450 } 2451 __ br(Assembler::EQ, known_ok); 2452 } else { 2453 if (UseCompressedClassPointers) { 2454 __ ldrw(rscratch1, dst_klass_addr); 2455 __ cmpw(tmp, rscratch1); 2456 } else { 2457 __ ldr(rscratch1, dst_klass_addr); 2458 __ cmp(tmp, rscratch1); 2459 } 2460 __ br(Assembler::EQ, known_ok); 2461 __ cmp(src, dst); 2462 __ br(Assembler::EQ, known_ok); 2463 } 2464 __ bind(halt); 2465 __ stop("incorrect type information in arraycopy"); 2466 __ bind(known_ok); 2467 } 2468 #endif 2469 2470 #ifndef PRODUCT 2471 if (PrintC1Statistics) { 2472 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); 2473 } 2474 #endif 2475 2476 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2477 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2478 assert_different_registers(c_rarg0, dst, dst_pos, length); 2479 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2480 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2481 assert_different_registers(c_rarg1, dst, length); 2482 __ uxtw(c_rarg2, length); 2483 assert_different_registers(c_rarg2, dst); 2484 2485 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2486 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2487 const char *name; 2488 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2489 2490 CodeBlob *cb = CodeCache::find_blob(entry); 2491 if (cb) { 2492 __ far_call(RuntimeAddress(entry)); 2493 } else { 2494 __ call_VM_leaf(entry, 3); 2495 } 2496 2497 __ bind(*stub->continuation()); 2498 } 2499 2500 2501 2502 2503 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2504 Register obj = op->obj_opr()->as_register(); // may not be an oop 2505 Register hdr = op->hdr_opr()->as_register(); 2506 Register lock = op->lock_opr()->as_register(); 2507 if (!UseFastLocking) { 2508 __ b(*op->stub()->entry()); 2509 } else if (op->code() == lir_lock) { 2510 Register scratch = noreg; 2511 if (UseBiasedLocking) { 2512 scratch = op->scratch_opr()->as_register(); 2513 } 2514 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2515 __ resolve(ACCESS_READ | ACCESS_WRITE, obj); 2516 // add debug info for NullPointerException only if one is possible 2517 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 2518 if (op->info() != NULL) { 2519 add_debug_info_for_null_check(null_check_offset, op->info()); 2520 } 2521 // done 2522 } else if (op->code() == lir_unlock) { 2523 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2524 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2525 } else { 2526 Unimplemented(); 2527 } 2528 __ bind(*op->stub()->continuation()); 2529 } 2530 2531 2532 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2533 ciMethod* method = op->profiled_method(); 2534 int bci = op->profiled_bci(); 2535 ciMethod* callee = op->profiled_callee(); 2536 2537 // Update counter for all call types 2538 ciMethodData* md = method->method_data_or_null(); 2539 assert(md != NULL, "Sanity"); 2540 ciProfileData* data = md->bci_to_data(bci); 2541 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2542 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2543 Register mdo = op->mdo()->as_register(); 2544 __ mov_metadata(mdo, md->constant_encoding()); 2545 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2546 // Perform additional virtual call profiling for invokevirtual and 2547 // invokeinterface bytecodes 2548 if (op->should_profile_receiver_type()) { 2549 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2550 Register recv = op->recv()->as_register(); 2551 assert_different_registers(mdo, recv); 2552 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2553 ciKlass* known_klass = op->known_holder(); 2554 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2555 // We know the type that will be seen at this call site; we can 2556 // statically update the MethodData* rather than needing to do 2557 // dynamic tests on the receiver type 2558 2559 // NOTE: we should probably put a lock around this search to 2560 // avoid collisions by concurrent compilations 2561 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2562 uint i; 2563 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2564 ciKlass* receiver = vc_data->receiver(i); 2565 if (known_klass->equals(receiver)) { 2566 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2567 __ addptr(data_addr, DataLayout::counter_increment); 2568 return; 2569 } 2570 } 2571 2572 // Receiver type not found in profile data; select an empty slot 2573 2574 // Note that this is less efficient than it should be because it 2575 // always does a write to the receiver part of the 2576 // VirtualCallData rather than just the first time 2577 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2578 ciKlass* receiver = vc_data->receiver(i); 2579 if (receiver == NULL) { 2580 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2581 __ mov_metadata(rscratch1, known_klass->constant_encoding()); 2582 __ lea(rscratch2, recv_addr); 2583 __ str(rscratch1, Address(rscratch2)); 2584 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2585 __ addptr(data_addr, DataLayout::counter_increment); 2586 return; 2587 } 2588 } 2589 } else { 2590 __ load_klass(recv, recv); 2591 Label update_done; 2592 type_profile_helper(mdo, md, data, recv, &update_done); 2593 // Receiver did not match any saved receiver and there is no empty row for it. 2594 // Increment total counter to indicate polymorphic case. 2595 __ addptr(counter_addr, DataLayout::counter_increment); 2596 2597 __ bind(update_done); 2598 } 2599 } else { 2600 // Static call 2601 __ addptr(counter_addr, DataLayout::counter_increment); 2602 } 2603 } 2604 2605 2606 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2607 Unimplemented(); 2608 } 2609 2610 2611 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2612 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 2613 } 2614 2615 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2616 assert(op->crc()->is_single_cpu(), "crc must be register"); 2617 assert(op->val()->is_single_cpu(), "byte value must be register"); 2618 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2619 Register crc = op->crc()->as_register(); 2620 Register val = op->val()->as_register(); 2621 Register res = op->result_opr()->as_register(); 2622 2623 assert_different_registers(val, crc, res); 2624 unsigned long offset; 2625 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2626 if (offset) __ add(res, res, offset); 2627 2628 __ mvnw(crc, crc); // ~crc 2629 __ update_byte_crc32(crc, val, res); 2630 __ mvnw(res, crc); // ~crc 2631 } 2632 2633 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2634 COMMENT("emit_profile_type {"); 2635 Register obj = op->obj()->as_register(); 2636 Register tmp = op->tmp()->as_pointer_register(); 2637 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2638 ciKlass* exact_klass = op->exact_klass(); 2639 intptr_t current_klass = op->current_klass(); 2640 bool not_null = op->not_null(); 2641 bool no_conflict = op->no_conflict(); 2642 2643 Label update, next, none; 2644 2645 bool do_null = !not_null; 2646 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2647 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2648 2649 assert(do_null || do_update, "why are we here?"); 2650 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2651 assert(mdo_addr.base() != rscratch1, "wrong register"); 2652 2653 __ verify_oop(obj); 2654 2655 if (tmp != obj) { 2656 __ mov(tmp, obj); 2657 } 2658 if (do_null) { 2659 __ cbnz(tmp, update); 2660 if (!TypeEntries::was_null_seen(current_klass)) { 2661 __ ldr(rscratch2, mdo_addr); 2662 __ orr(rscratch2, rscratch2, TypeEntries::null_seen); 2663 __ str(rscratch2, mdo_addr); 2664 } 2665 if (do_update) { 2666 #ifndef ASSERT 2667 __ b(next); 2668 } 2669 #else 2670 __ b(next); 2671 } 2672 } else { 2673 __ cbnz(tmp, update); 2674 __ stop("unexpected null obj"); 2675 #endif 2676 } 2677 2678 __ bind(update); 2679 2680 if (do_update) { 2681 #ifdef ASSERT 2682 if (exact_klass != NULL) { 2683 Label ok; 2684 __ load_klass(tmp, tmp); 2685 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2686 __ eor(rscratch1, tmp, rscratch1); 2687 __ cbz(rscratch1, ok); 2688 __ stop("exact klass and actual klass differ"); 2689 __ bind(ok); 2690 } 2691 #endif 2692 if (!no_conflict) { 2693 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2694 if (exact_klass != NULL) { 2695 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2696 } else { 2697 __ load_klass(tmp, tmp); 2698 } 2699 2700 __ ldr(rscratch2, mdo_addr); 2701 __ eor(tmp, tmp, rscratch2); 2702 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2703 // klass seen before, nothing to do. The unknown bit may have been 2704 // set already but no need to check. 2705 __ cbz(rscratch1, next); 2706 2707 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2708 2709 if (TypeEntries::is_type_none(current_klass)) { 2710 __ cbz(rscratch2, none); 2711 __ cmp(rscratch2, (u1)TypeEntries::null_seen); 2712 __ br(Assembler::EQ, none); 2713 // There is a chance that the checks above (re-reading profiling 2714 // data from memory) fail if another thread has just set the 2715 // profiling to this obj's klass 2716 __ dmb(Assembler::ISHLD); 2717 __ ldr(rscratch2, mdo_addr); 2718 __ eor(tmp, tmp, rscratch2); 2719 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2720 __ cbz(rscratch1, next); 2721 } 2722 } else { 2723 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2724 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2725 2726 __ ldr(tmp, mdo_addr); 2727 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2728 } 2729 2730 // different than before. Cannot keep accurate profile. 2731 __ ldr(rscratch2, mdo_addr); 2732 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown); 2733 __ str(rscratch2, mdo_addr); 2734 2735 if (TypeEntries::is_type_none(current_klass)) { 2736 __ b(next); 2737 2738 __ bind(none); 2739 // first time here. Set profile type. 2740 __ str(tmp, mdo_addr); 2741 } 2742 } else { 2743 // There's a single possible klass at this profile point 2744 assert(exact_klass != NULL, "should be"); 2745 if (TypeEntries::is_type_none(current_klass)) { 2746 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2747 __ ldr(rscratch2, mdo_addr); 2748 __ eor(tmp, tmp, rscratch2); 2749 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2750 __ cbz(rscratch1, next); 2751 #ifdef ASSERT 2752 { 2753 Label ok; 2754 __ ldr(rscratch1, mdo_addr); 2755 __ cbz(rscratch1, ok); 2756 __ cmp(rscratch1, (u1)TypeEntries::null_seen); 2757 __ br(Assembler::EQ, ok); 2758 // may have been set by another thread 2759 __ dmb(Assembler::ISHLD); 2760 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2761 __ ldr(rscratch2, mdo_addr); 2762 __ eor(rscratch2, rscratch1, rscratch2); 2763 __ andr(rscratch2, rscratch2, TypeEntries::type_mask); 2764 __ cbz(rscratch2, ok); 2765 2766 __ stop("unexpected profiling mismatch"); 2767 __ bind(ok); 2768 } 2769 #endif 2770 // first time here. Set profile type. 2771 __ ldr(tmp, mdo_addr); 2772 } else { 2773 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2774 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 2775 2776 __ ldr(tmp, mdo_addr); 2777 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore. 2778 2779 __ orr(tmp, tmp, TypeEntries::type_unknown); 2780 __ str(tmp, mdo_addr); 2781 // FIXME: Write barrier needed here? 2782 } 2783 } 2784 2785 __ bind(next); 2786 } 2787 COMMENT("} emit_profile_type"); 2788 } 2789 2790 2791 void LIR_Assembler::align_backward_branch_target() { 2792 } 2793 2794 2795 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2796 if (left->is_single_cpu()) { 2797 assert(dest->is_single_cpu(), "expect single result reg"); 2798 __ negw(dest->as_register(), left->as_register()); 2799 } else if (left->is_double_cpu()) { 2800 assert(dest->is_double_cpu(), "expect double result reg"); 2801 __ neg(dest->as_register_lo(), left->as_register_lo()); 2802 } else if (left->is_single_fpu()) { 2803 assert(dest->is_single_fpu(), "expect single float result reg"); 2804 __ fnegs(dest->as_float_reg(), left->as_float_reg()); 2805 } else { 2806 assert(left->is_double_fpu(), "expect double float operand reg"); 2807 assert(dest->is_double_fpu(), "expect double float result reg"); 2808 __ fnegd(dest->as_double_reg(), left->as_double_reg()); 2809 } 2810 } 2811 2812 2813 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2814 assert(patch_code == lir_patch_none, "Patch code not supported"); 2815 __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); 2816 } 2817 2818 2819 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2820 assert(!tmp->is_valid(), "don't need temporary"); 2821 2822 CodeBlob *cb = CodeCache::find_blob(dest); 2823 if (cb) { 2824 __ far_call(RuntimeAddress(dest)); 2825 } else { 2826 __ mov(rscratch1, RuntimeAddress(dest)); 2827 int len = args->length(); 2828 int type = 0; 2829 if (! result->is_illegal()) { 2830 switch (result->type()) { 2831 case T_VOID: 2832 type = 0; 2833 break; 2834 case T_INT: 2835 case T_LONG: 2836 case T_OBJECT: 2837 type = 1; 2838 break; 2839 case T_FLOAT: 2840 type = 2; 2841 break; 2842 case T_DOUBLE: 2843 type = 3; 2844 break; 2845 default: 2846 ShouldNotReachHere(); 2847 break; 2848 } 2849 } 2850 int num_gpargs = 0; 2851 int num_fpargs = 0; 2852 for (int i = 0; i < args->length(); i++) { 2853 LIR_Opr arg = args->at(i); 2854 if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) { 2855 num_fpargs++; 2856 } else { 2857 num_gpargs++; 2858 } 2859 } 2860 __ blrt(rscratch1, num_gpargs, num_fpargs, type); 2861 } 2862 2863 if (info != NULL) { 2864 add_call_info_here(info); 2865 } 2866 __ maybe_isb(); 2867 } 2868 2869 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2870 if (dest->is_address() || src->is_address()) { 2871 move_op(src, dest, type, lir_patch_none, info, 2872 /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false); 2873 } else { 2874 ShouldNotReachHere(); 2875 } 2876 } 2877 2878 #ifdef ASSERT 2879 // emit run-time assertion 2880 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2881 assert(op->code() == lir_assert, "must be"); 2882 2883 if (op->in_opr1()->is_valid()) { 2884 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2885 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2886 } else { 2887 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2888 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2889 } 2890 2891 Label ok; 2892 if (op->condition() != lir_cond_always) { 2893 Assembler::Condition acond = Assembler::AL; 2894 switch (op->condition()) { 2895 case lir_cond_equal: acond = Assembler::EQ; break; 2896 case lir_cond_notEqual: acond = Assembler::NE; break; 2897 case lir_cond_less: acond = Assembler::LT; break; 2898 case lir_cond_lessEqual: acond = Assembler::LE; break; 2899 case lir_cond_greaterEqual: acond = Assembler::GE; break; 2900 case lir_cond_greater: acond = Assembler::GT; break; 2901 case lir_cond_belowEqual: acond = Assembler::LS; break; 2902 case lir_cond_aboveEqual: acond = Assembler::HS; break; 2903 default: ShouldNotReachHere(); 2904 } 2905 __ br(acond, ok); 2906 } 2907 if (op->halt()) { 2908 const char* str = __ code_string(op->msg()); 2909 __ stop(str); 2910 } else { 2911 breakpoint(); 2912 } 2913 __ bind(ok); 2914 } 2915 #endif 2916 2917 #ifndef PRODUCT 2918 #define COMMENT(x) do { __ block_comment(x); } while (0) 2919 #else 2920 #define COMMENT(x) 2921 #endif 2922 2923 void LIR_Assembler::membar() { 2924 COMMENT("membar"); 2925 __ membar(MacroAssembler::AnyAny); 2926 } 2927 2928 void LIR_Assembler::membar_acquire() { 2929 __ membar(Assembler::LoadLoad|Assembler::LoadStore); 2930 } 2931 2932 void LIR_Assembler::membar_release() { 2933 __ membar(Assembler::LoadStore|Assembler::StoreStore); 2934 } 2935 2936 void LIR_Assembler::membar_loadload() { 2937 __ membar(Assembler::LoadLoad); 2938 } 2939 2940 void LIR_Assembler::membar_storestore() { 2941 __ membar(MacroAssembler::StoreStore); 2942 } 2943 2944 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); } 2945 2946 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); } 2947 2948 void LIR_Assembler::on_spin_wait() { 2949 Unimplemented(); 2950 } 2951 2952 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2953 __ mov(result_reg->as_register(), rthread); 2954 } 2955 2956 2957 void LIR_Assembler::peephole(LIR_List *lir) { 2958 #if 0 2959 if (tableswitch_count >= max_tableswitches) 2960 return; 2961 2962 /* 2963 This finite-state automaton recognizes sequences of compare-and- 2964 branch instructions. We will turn them into a tableswitch. You 2965 could argue that C1 really shouldn't be doing this sort of 2966 optimization, but without it the code is really horrible. 2967 */ 2968 2969 enum { start_s, cmp1_s, beq_s, cmp_s } state; 2970 int first_key, last_key = -2147483648; 2971 int next_key = 0; 2972 int start_insn = -1; 2973 int last_insn = -1; 2974 Register reg = noreg; 2975 LIR_Opr reg_opr; 2976 state = start_s; 2977 2978 LIR_OpList* inst = lir->instructions_list(); 2979 for (int i = 0; i < inst->length(); i++) { 2980 LIR_Op* op = inst->at(i); 2981 switch (state) { 2982 case start_s: 2983 first_key = -1; 2984 start_insn = i; 2985 switch (op->code()) { 2986 case lir_cmp: 2987 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 2988 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 2989 if (opr1->is_cpu_register() && opr1->is_single_cpu() 2990 && opr2->is_constant() 2991 && opr2->type() == T_INT) { 2992 reg_opr = opr1; 2993 reg = opr1->as_register(); 2994 first_key = opr2->as_constant_ptr()->as_jint(); 2995 next_key = first_key + 1; 2996 state = cmp_s; 2997 goto next_state; 2998 } 2999 break; 3000 } 3001 break; 3002 case cmp_s: 3003 switch (op->code()) { 3004 case lir_branch: 3005 if (op->as_OpBranch()->cond() == lir_cond_equal) { 3006 state = beq_s; 3007 last_insn = i; 3008 goto next_state; 3009 } 3010 } 3011 state = start_s; 3012 break; 3013 case beq_s: 3014 switch (op->code()) { 3015 case lir_cmp: { 3016 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 3017 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 3018 if (opr1->is_cpu_register() && opr1->is_single_cpu() 3019 && opr1->as_register() == reg 3020 && opr2->is_constant() 3021 && opr2->type() == T_INT 3022 && opr2->as_constant_ptr()->as_jint() == next_key) { 3023 last_key = next_key; 3024 next_key++; 3025 state = cmp_s; 3026 goto next_state; 3027 } 3028 } 3029 } 3030 last_key = next_key; 3031 state = start_s; 3032 break; 3033 default: 3034 assert(false, "impossible state"); 3035 } 3036 if (state == start_s) { 3037 if (first_key < last_key - 5L && reg != noreg) { 3038 { 3039 // printf("found run register %d starting at insn %d low value %d high value %d\n", 3040 // reg->encoding(), 3041 // start_insn, first_key, last_key); 3042 // for (int i = 0; i < inst->length(); i++) { 3043 // inst->at(i)->print(); 3044 // tty->print("\n"); 3045 // } 3046 // tty->print("\n"); 3047 } 3048 3049 struct tableswitch *sw = &switches[tableswitch_count]; 3050 sw->_insn_index = start_insn, sw->_first_key = first_key, 3051 sw->_last_key = last_key, sw->_reg = reg; 3052 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after)); 3053 { 3054 // Insert the new table of branches 3055 int offset = last_insn; 3056 for (int n = first_key; n < last_key; n++) { 3057 inst->insert_before 3058 (last_insn + 1, 3059 new LIR_OpBranch(lir_cond_always, T_ILLEGAL, 3060 inst->at(offset)->as_OpBranch()->label())); 3061 offset -= 2, i++; 3062 } 3063 } 3064 // Delete all the old compare-and-branch instructions 3065 for (int n = first_key; n < last_key; n++) { 3066 inst->remove_at(start_insn); 3067 inst->remove_at(start_insn); 3068 } 3069 // Insert the tableswitch instruction 3070 inst->insert_before(start_insn, 3071 new LIR_Op2(lir_cmp, lir_cond_always, 3072 LIR_OprFact::intConst(tableswitch_count), 3073 reg_opr)); 3074 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches)); 3075 tableswitch_count++; 3076 } 3077 reg = noreg; 3078 last_key = -2147483648; 3079 } 3080 next_state: 3081 ; 3082 } 3083 #endif 3084 } 3085 3086 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) { 3087 Address addr = as_Address(src->as_address_ptr()); 3088 BasicType type = src->type(); 3089 bool is_oop = type == T_OBJECT || type == T_ARRAY; 3090 3091 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr); 3092 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr); 3093 3094 switch(type) { 3095 case T_INT: 3096 xchg = &MacroAssembler::atomic_xchgalw; 3097 add = &MacroAssembler::atomic_addalw; 3098 break; 3099 case T_LONG: 3100 xchg = &MacroAssembler::atomic_xchgal; 3101 add = &MacroAssembler::atomic_addal; 3102 break; 3103 case T_OBJECT: 3104 case T_ARRAY: 3105 if (UseCompressedOops) { 3106 xchg = &MacroAssembler::atomic_xchgalw; 3107 add = &MacroAssembler::atomic_addalw; 3108 } else { 3109 xchg = &MacroAssembler::atomic_xchgal; 3110 add = &MacroAssembler::atomic_addal; 3111 } 3112 break; 3113 default: 3114 ShouldNotReachHere(); 3115 xchg = &MacroAssembler::atomic_xchgal; 3116 add = &MacroAssembler::atomic_addal; // unreachable 3117 } 3118 3119 switch (code) { 3120 case lir_xadd: 3121 { 3122 RegisterOrConstant inc; 3123 Register tmp = as_reg(tmp_op); 3124 Register dst = as_reg(dest); 3125 if (data->is_constant()) { 3126 inc = RegisterOrConstant(as_long(data)); 3127 assert_different_registers(dst, addr.base(), tmp, 3128 rscratch1, rscratch2); 3129 } else { 3130 inc = RegisterOrConstant(as_reg(data)); 3131 assert_different_registers(inc.as_register(), dst, addr.base(), tmp, 3132 rscratch1, rscratch2); 3133 } 3134 __ lea(tmp, addr); 3135 (_masm->*add)(dst, inc, tmp); 3136 break; 3137 } 3138 case lir_xchg: 3139 { 3140 Register tmp = tmp_op->as_register(); 3141 Register obj = as_reg(data); 3142 Register dst = as_reg(dest); 3143 if (is_oop && UseCompressedOops) { 3144 __ encode_heap_oop(rscratch2, obj); 3145 obj = rscratch2; 3146 } 3147 assert_different_registers(obj, addr.base(), tmp, rscratch1, dst); 3148 __ lea(tmp, addr); 3149 (_masm->*xchg)(dst, obj, tmp); 3150 if (is_oop && UseCompressedOops) { 3151 __ decode_heap_oop(dst); 3152 } 3153 } 3154 break; 3155 default: 3156 ShouldNotReachHere(); 3157 } 3158 __ membar(__ AnyAny); 3159 } 3160 3161 #undef __