1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Compilation.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArrayKlass.hpp" 35 #include "ci/ciInstance.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/cardTableModRefBS.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "nativeInst_aarch64.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "vmreg_aarch64.inline.hpp" 43 44 45 46 #ifndef PRODUCT 47 #define COMMENT(x) do { __ block_comment(x); } while (0) 48 #else 49 #define COMMENT(x) 50 #endif 51 52 NEEDS_CLEANUP // remove this definitions ? 53 const Register IC_Klass = rscratch2; // where the IC klass is cached 54 const Register SYNC_header = r0; // synchronization header 55 const Register SHIFT_count = r0; // where count for shift operations must be 56 57 #define __ _masm-> 58 59 60 static void select_different_registers(Register preserve, 61 Register extra, 62 Register &tmp1, 63 Register &tmp2) { 64 if (tmp1 == preserve) { 65 assert_different_registers(tmp1, tmp2, extra); 66 tmp1 = extra; 67 } else if (tmp2 == preserve) { 68 assert_different_registers(tmp1, tmp2, extra); 69 tmp2 = extra; 70 } 71 assert_different_registers(preserve, tmp1, tmp2); 72 } 73 74 75 76 static void select_different_registers(Register preserve, 77 Register extra, 78 Register &tmp1, 79 Register &tmp2, 80 Register &tmp3) { 81 if (tmp1 == preserve) { 82 assert_different_registers(tmp1, tmp2, tmp3, extra); 83 tmp1 = extra; 84 } else if (tmp2 == preserve) { 85 assert_different_registers(tmp1, tmp2, tmp3, extra); 86 tmp2 = extra; 87 } else if (tmp3 == preserve) { 88 assert_different_registers(tmp1, tmp2, tmp3, extra); 89 tmp3 = extra; 90 } 91 assert_different_registers(preserve, tmp1, tmp2, tmp3); 92 } 93 94 95 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } 96 97 98 LIR_Opr LIR_Assembler::receiverOpr() { 99 return FrameMap::receiver_opr; 100 } 101 102 LIR_Opr LIR_Assembler::osrBufferPointer() { 103 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 104 } 105 106 //--------------fpu register translations----------------------- 107 108 109 address LIR_Assembler::float_constant(float f) { 110 address const_addr = __ float_constant(f); 111 if (const_addr == NULL) { 112 bailout("const section overflow"); 113 return __ code()->consts()->start(); 114 } else { 115 return const_addr; 116 } 117 } 118 119 120 address LIR_Assembler::double_constant(double d) { 121 address const_addr = __ double_constant(d); 122 if (const_addr == NULL) { 123 bailout("const section overflow"); 124 return __ code()->consts()->start(); 125 } else { 126 return const_addr; 127 } 128 } 129 130 address LIR_Assembler::int_constant(jlong n) { 131 address const_addr = __ long_constant(n); 132 if (const_addr == NULL) { 133 bailout("const section overflow"); 134 return __ code()->consts()->start(); 135 } else { 136 return const_addr; 137 } 138 } 139 140 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); } 141 142 void LIR_Assembler::reset_FPU() { Unimplemented(); } 143 144 void LIR_Assembler::fpop() { Unimplemented(); } 145 146 void LIR_Assembler::fxch(int i) { Unimplemented(); } 147 148 void LIR_Assembler::fld(int i) { Unimplemented(); } 149 150 void LIR_Assembler::ffree(int i) { Unimplemented(); } 151 152 void LIR_Assembler::breakpoint() { Unimplemented(); } 153 154 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } 155 156 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } 157 158 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; } 159 //------------------------------------------- 160 161 static Register as_reg(LIR_Opr op) { 162 return op->is_double_cpu() ? op->as_register_lo() : op->as_register(); 163 } 164 165 static jlong as_long(LIR_Opr data) { 166 jlong result; 167 switch (data->type()) { 168 case T_INT: 169 result = (data->as_jint()); 170 break; 171 case T_LONG: 172 result = (data->as_jlong()); 173 break; 174 default: 175 ShouldNotReachHere(); 176 result = 0; // unreachable 177 } 178 return result; 179 } 180 181 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 182 Register base = addr->base()->as_pointer_register(); 183 LIR_Opr opr = addr->index(); 184 if (opr->is_cpu_register()) { 185 Register index; 186 if (opr->is_single_cpu()) 187 index = opr->as_register(); 188 else 189 index = opr->as_register_lo(); 190 assert(addr->disp() == 0, "must be"); 191 switch(opr->type()) { 192 case T_INT: 193 return Address(base, index, Address::sxtw(addr->scale())); 194 case T_LONG: 195 return Address(base, index, Address::lsl(addr->scale())); 196 default: 197 ShouldNotReachHere(); 198 } 199 } else { 200 intptr_t addr_offset = intptr_t(addr->disp()); 201 if (Address::offset_ok_for_immed(addr_offset, addr->scale())) 202 return Address(base, addr_offset, Address::lsl(addr->scale())); 203 else { 204 __ mov(tmp, addr_offset); 205 return Address(base, tmp, Address::lsl(addr->scale())); 206 } 207 } 208 return Address(); 209 } 210 211 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 212 ShouldNotReachHere(); 213 return Address(); 214 } 215 216 Address LIR_Assembler::as_Address(LIR_Address* addr) { 217 return as_Address(addr, rscratch1); 218 } 219 220 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 221 return as_Address(addr, rscratch1); // Ouch 222 // FIXME: This needs to be much more clever. See x86. 223 } 224 225 226 void LIR_Assembler::osr_entry() { 227 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 228 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 229 ValueStack* entry_state = osr_entry->state(); 230 int number_of_locks = entry_state->locks_size(); 231 232 // we jump here if osr happens with the interpreter 233 // state set up to continue at the beginning of the 234 // loop that triggered osr - in particular, we have 235 // the following registers setup: 236 // 237 // r2: osr buffer 238 // 239 240 // build frame 241 ciMethod* m = compilation()->method(); 242 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 243 244 // OSR buffer is 245 // 246 // locals[nlocals-1..0] 247 // monitors[0..number_of_locks] 248 // 249 // locals is a direct copy of the interpreter frame so in the osr buffer 250 // so first slot in the local array is the last local from the interpreter 251 // and last slot is local[0] (receiver) from the interpreter 252 // 253 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 254 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 255 // in the interpreter frame (the method lock if a sync method) 256 257 // Initialize monitors in the compiled activation. 258 // r2: pointer to osr buffer 259 // 260 // All other registers are dead at this point and the locals will be 261 // copied into place by code emitted in the IR. 262 263 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 264 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 265 int monitor_offset = BytesPerWord * method()->max_locals() + 266 (2 * BytesPerWord) * (number_of_locks - 1); 267 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 268 // the OSR buffer using 2 word entries: first the lock and then 269 // the oop. 270 for (int i = 0; i < number_of_locks; i++) { 271 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 272 #ifdef ASSERT 273 // verify the interpreter's monitor has a non-null object 274 { 275 Label L; 276 __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 277 __ cbnz(rscratch1, L); 278 __ stop("locked object is NULL"); 279 __ bind(L); 280 } 281 #endif 282 __ ldr(r19, Address(OSR_buf, slot_offset + 0)); 283 __ str(r19, frame_map()->address_for_monitor_lock(i)); 284 __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 285 __ str(r19, frame_map()->address_for_monitor_object(i)); 286 } 287 } 288 } 289 290 291 // inline cache check; done before the frame is built. 292 int LIR_Assembler::check_icache() { 293 Register receiver = FrameMap::receiver_opr->as_register(); 294 Register ic_klass = IC_Klass; 295 int start_offset = __ offset(); 296 __ inline_cache_check(receiver, ic_klass); 297 298 // if icache check fails, then jump to runtime routine 299 // Note: RECEIVER must still contain the receiver! 300 Label dont; 301 __ br(Assembler::EQ, dont); 302 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 303 304 // We align the verified entry point unless the method body 305 // (including its inline cache check) will fit in a single 64-byte 306 // icache line. 307 if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) { 308 // force alignment after the cache check. 309 __ align(CodeEntryAlignment); 310 } 311 312 __ bind(dont); 313 return start_offset; 314 } 315 316 317 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 318 if (o == NULL) { 319 __ mov(reg, zr); 320 } else { 321 __ movoop(reg, o, /*immediate*/true); 322 } 323 } 324 325 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { 326 address target = NULL; 327 relocInfo::relocType reloc_type = relocInfo::none; 328 329 switch (patching_id(info)) { 330 case PatchingStub::access_field_id: 331 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 332 reloc_type = relocInfo::section_word_type; 333 break; 334 case PatchingStub::load_klass_id: 335 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 336 reloc_type = relocInfo::metadata_type; 337 break; 338 case PatchingStub::load_mirror_id: 339 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 340 reloc_type = relocInfo::oop_type; 341 break; 342 case PatchingStub::load_appendix_id: 343 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 344 reloc_type = relocInfo::oop_type; 345 break; 346 default: ShouldNotReachHere(); 347 } 348 349 __ far_call(RuntimeAddress(target)); 350 add_call_info_here(info); 351 } 352 353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 354 deoptimize_trap(info); 355 } 356 357 358 // This specifies the rsp decrement needed to build the frame 359 int LIR_Assembler::initial_frame_size_in_bytes() const { 360 // if rounding, must let FrameMap know! 361 362 // The frame_map records size in slots (32bit word) 363 364 // subtract two words to account for return address and link 365 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 366 } 367 368 369 int LIR_Assembler::emit_exception_handler() { 370 // if the last instruction is a call (typically to do a throw which 371 // is coming at the end after block reordering) the return address 372 // must still point into the code area in order to avoid assertion 373 // failures when searching for the corresponding bci => add a nop 374 // (was bug 5/14/1999 - gri) 375 __ nop(); 376 377 // generate code for exception handler 378 address handler_base = __ start_a_stub(exception_handler_size); 379 if (handler_base == NULL) { 380 // not enough space left for the handler 381 bailout("exception handler overflow"); 382 return -1; 383 } 384 385 int offset = code_offset(); 386 387 // the exception oop and pc are in r0, and r3 388 // no other registers need to be preserved, so invalidate them 389 __ invalidate_registers(false, true, true, false, true, true); 390 391 // check that there is really an exception 392 __ verify_not_null_oop(r0); 393 394 // search an exception handler (r0: exception oop, r3: throwing pc) 395 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here(); 396 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 397 __ end_a_stub(); 398 399 return offset; 400 } 401 402 403 // Emit the code to remove the frame from the stack in the exception 404 // unwind path. 405 int LIR_Assembler::emit_unwind_handler() { 406 #ifndef PRODUCT 407 if (CommentedAssembly) { 408 _masm->block_comment("Unwind handler"); 409 } 410 #endif 411 412 int offset = code_offset(); 413 414 // Fetch the exception from TLS and clear out exception related thread state 415 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 416 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 417 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 418 419 __ bind(_unwind_handler_entry); 420 __ verify_not_null_oop(r0); 421 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 422 __ mov(r19, r0); // Preserve the exception 423 } 424 425 // Preform needed unlocking 426 MonitorExitStub* stub = NULL; 427 if (method()->is_synchronized()) { 428 monitor_address(0, FrameMap::r0_opr); 429 stub = new MonitorExitStub(FrameMap::r0_opr, true, 0); 430 __ unlock_object(r5, r4, r0, *stub->entry()); 431 __ bind(*stub->continuation()); 432 } 433 434 if (compilation()->env()->dtrace_method_probes()) { 435 __ call_Unimplemented(); 436 #if 0 437 __ movptr(Address(rsp, 0), rax); 438 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 439 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 440 #endif 441 } 442 443 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 444 __ mov(r0, r19); // Restore the exception 445 } 446 447 // remove the activation and dispatch to the unwind handler 448 __ block_comment("remove_frame and dispatch to the unwind handler"); 449 __ remove_frame(initial_frame_size_in_bytes()); 450 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 451 452 // Emit the slow path assembly 453 if (stub != NULL) { 454 stub->emit_code(this); 455 } 456 457 return offset; 458 } 459 460 461 int LIR_Assembler::emit_deopt_handler() { 462 // if the last instruction is a call (typically to do a throw which 463 // is coming at the end after block reordering) the return address 464 // must still point into the code area in order to avoid assertion 465 // failures when searching for the corresponding bci => add a nop 466 // (was bug 5/14/1999 - gri) 467 __ nop(); 468 469 // generate code for exception handler 470 address handler_base = __ start_a_stub(deopt_handler_size); 471 if (handler_base == NULL) { 472 // not enough space left for the handler 473 bailout("deopt handler overflow"); 474 return -1; 475 } 476 477 int offset = code_offset(); 478 479 __ adr(lr, pc()); 480 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 481 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 482 __ end_a_stub(); 483 484 return offset; 485 } 486 487 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { 488 _masm->code_section()->relocate(adr, relocInfo::poll_type); 489 int pc_offset = code_offset(); 490 flush_debug_info(pc_offset); 491 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 492 if (info->exception_handlers() != NULL) { 493 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 494 } 495 } 496 497 // Rather than take a segfault when the polling page is protected, 498 // explicitly check for a safepoint in progress and if there is one, 499 // fake a call to the handler as if a segfault had been caught. 500 void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) { 501 __ mov(rscratch1, SafepointSynchronize::address_of_state()); 502 __ ldrb(rscratch1, Address(rscratch1)); 503 Label nope, poll; 504 __ cbz(rscratch1, nope); 505 __ block_comment("safepoint"); 506 __ enter(); 507 __ push(0x3, sp); // r0 & r1 508 __ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1 509 __ adr(r0, poll); 510 __ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset())); 511 __ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub)); 512 __ blrt(rscratch1, 1, 0, 1); 513 __ maybe_isb(); 514 __ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1 515 __ mov(rscratch1, r0); 516 __ pop(0x3, sp); // r0 & r1 517 __ leave(); 518 __ br(rscratch1); 519 address polling_page(os::get_polling_page()); 520 assert(os::is_poll_address(polling_page), "should be"); 521 unsigned long off; 522 __ adrp(rscratch1, Address(polling_page, rtype), off); 523 __ bind(poll); 524 if (info) 525 add_debug_info_for_branch(info); // This isn't just debug info: 526 // it's the oop map 527 else 528 __ code_section()->relocate(pc(), rtype); 529 __ ldrw(zr, Address(rscratch1, off)); 530 __ bind(nope); 531 } 532 533 void LIR_Assembler::return_op(LIR_Opr result) { 534 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,"); 535 // Pop the stack before the safepoint code 536 __ remove_frame(initial_frame_size_in_bytes()); 537 address polling_page(os::get_polling_page()); 538 __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type); 539 __ ret(lr); 540 } 541 542 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 543 address polling_page(os::get_polling_page()); 544 guarantee(info != NULL, "Shouldn't be NULL"); 545 assert(os::is_poll_address(polling_page), "should be"); 546 unsigned long off; 547 __ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off); 548 assert(off == 0, "must be"); 549 add_debug_info_for_branch(info); // This isn't just debug info: 550 // it's the oop map 551 __ read_polling_page(rscratch1, relocInfo::poll_type); 552 return __ offset(); 553 } 554 555 556 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 557 if (from_reg == r31_sp) 558 from_reg = sp; 559 if (to_reg == r31_sp) 560 to_reg = sp; 561 __ mov(to_reg, from_reg); 562 } 563 564 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } 565 566 567 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 568 assert(src->is_constant(), "should not call otherwise"); 569 assert(dest->is_register(), "should not call otherwise"); 570 LIR_Const* c = src->as_constant_ptr(); 571 572 switch (c->type()) { 573 case T_INT: { 574 assert(patch_code == lir_patch_none, "no patching handled here"); 575 __ movw(dest->as_register(), c->as_jint()); 576 break; 577 } 578 579 case T_ADDRESS: { 580 assert(patch_code == lir_patch_none, "no patching handled here"); 581 __ mov(dest->as_register(), c->as_jint()); 582 break; 583 } 584 585 case T_LONG: { 586 assert(patch_code == lir_patch_none, "no patching handled here"); 587 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong()); 588 break; 589 } 590 591 case T_OBJECT: { 592 if (patch_code == lir_patch_none) { 593 jobject2reg(c->as_jobject(), dest->as_register()); 594 } else { 595 jobject2reg_with_patching(dest->as_register(), info); 596 } 597 break; 598 } 599 600 case T_METADATA: { 601 if (patch_code != lir_patch_none) { 602 klass2reg_with_patching(dest->as_register(), info); 603 } else { 604 __ mov_metadata(dest->as_register(), c->as_metadata()); 605 } 606 break; 607 } 608 609 case T_FLOAT: { 610 if (__ operand_valid_for_float_immediate(c->as_jfloat())) { 611 __ fmovs(dest->as_float_reg(), (c->as_jfloat())); 612 } else { 613 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat()))); 614 __ ldrs(dest->as_float_reg(), Address(rscratch1)); 615 } 616 break; 617 } 618 619 case T_DOUBLE: { 620 if (__ operand_valid_for_float_immediate(c->as_jdouble())) { 621 __ fmovd(dest->as_double_reg(), (c->as_jdouble())); 622 } else { 623 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble()))); 624 __ ldrd(dest->as_double_reg(), Address(rscratch1)); 625 } 626 break; 627 } 628 629 default: 630 ShouldNotReachHere(); 631 } 632 } 633 634 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 635 LIR_Const* c = src->as_constant_ptr(); 636 switch (c->type()) { 637 case T_OBJECT: 638 { 639 if (! c->as_jobject()) 640 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 641 else { 642 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 643 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 644 } 645 } 646 break; 647 case T_ADDRESS: 648 { 649 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); 650 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); 651 } 652 case T_INT: 653 case T_FLOAT: 654 { 655 Register reg = zr; 656 if (c->as_jint_bits() == 0) 657 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 658 else { 659 __ movw(rscratch1, c->as_jint_bits()); 660 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix())); 661 } 662 } 663 break; 664 case T_LONG: 665 case T_DOUBLE: 666 { 667 Register reg = zr; 668 if (c->as_jlong_bits() == 0) 669 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(), 670 lo_word_offset_in_bytes)); 671 else { 672 __ mov(rscratch1, (intptr_t)c->as_jlong_bits()); 673 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(), 674 lo_word_offset_in_bytes)); 675 } 676 } 677 break; 678 default: 679 ShouldNotReachHere(); 680 } 681 } 682 683 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 684 assert(src->is_constant(), "should not call otherwise"); 685 LIR_Const* c = src->as_constant_ptr(); 686 LIR_Address* to_addr = dest->as_address_ptr(); 687 688 void (Assembler::* insn)(Register Rt, const Address &adr); 689 690 switch (type) { 691 case T_ADDRESS: 692 assert(c->as_jint() == 0, "should be"); 693 insn = &Assembler::str; 694 break; 695 case T_LONG: 696 assert(c->as_jlong() == 0, "should be"); 697 insn = &Assembler::str; 698 break; 699 case T_INT: 700 assert(c->as_jint() == 0, "should be"); 701 insn = &Assembler::strw; 702 break; 703 case T_OBJECT: 704 case T_ARRAY: 705 assert(c->as_jobject() == 0, "should be"); 706 if (UseCompressedOops && !wide) { 707 insn = &Assembler::strw; 708 } else { 709 insn = &Assembler::str; 710 } 711 break; 712 case T_CHAR: 713 case T_SHORT: 714 assert(c->as_jint() == 0, "should be"); 715 insn = &Assembler::strh; 716 break; 717 case T_BOOLEAN: 718 case T_BYTE: 719 assert(c->as_jint() == 0, "should be"); 720 insn = &Assembler::strb; 721 break; 722 default: 723 ShouldNotReachHere(); 724 insn = &Assembler::str; // unreachable 725 } 726 727 if (info) add_debug_info_for_null_check_here(info); 728 (_masm->*insn)(zr, as_Address(to_addr, rscratch1)); 729 } 730 731 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 732 assert(src->is_register(), "should not call otherwise"); 733 assert(dest->is_register(), "should not call otherwise"); 734 735 // move between cpu-registers 736 if (dest->is_single_cpu()) { 737 if (src->type() == T_LONG) { 738 // Can do LONG -> OBJECT 739 move_regs(src->as_register_lo(), dest->as_register()); 740 return; 741 } 742 assert(src->is_single_cpu(), "must match"); 743 if (src->type() == T_OBJECT) { 744 __ verify_oop(src->as_register()); 745 } 746 move_regs(src->as_register(), dest->as_register()); 747 748 } else if (dest->is_double_cpu()) { 749 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 750 // Surprising to me but we can see move of a long to t_object 751 __ verify_oop(src->as_register()); 752 move_regs(src->as_register(), dest->as_register_lo()); 753 return; 754 } 755 assert(src->is_double_cpu(), "must match"); 756 Register f_lo = src->as_register_lo(); 757 Register f_hi = src->as_register_hi(); 758 Register t_lo = dest->as_register_lo(); 759 Register t_hi = dest->as_register_hi(); 760 assert(f_hi == f_lo, "must be same"); 761 assert(t_hi == t_lo, "must be same"); 762 move_regs(f_lo, t_lo); 763 764 } else if (dest->is_single_fpu()) { 765 __ fmovs(dest->as_float_reg(), src->as_float_reg()); 766 767 } else if (dest->is_double_fpu()) { 768 __ fmovd(dest->as_double_reg(), src->as_double_reg()); 769 770 } else { 771 ShouldNotReachHere(); 772 } 773 } 774 775 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 776 if (src->is_single_cpu()) { 777 if (type == T_ARRAY || type == T_OBJECT) { 778 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 779 __ verify_oop(src->as_register()); 780 } else if (type == T_METADATA || type == T_DOUBLE) { 781 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 782 } else { 783 __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); 784 } 785 786 } else if (src->is_double_cpu()) { 787 Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 788 __ str(src->as_register_lo(), dest_addr_LO); 789 790 } else if (src->is_single_fpu()) { 791 Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 792 __ strs(src->as_float_reg(), dest_addr); 793 794 } else if (src->is_double_fpu()) { 795 Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 796 __ strd(src->as_double_reg(), dest_addr); 797 798 } else { 799 ShouldNotReachHere(); 800 } 801 802 } 803 804 805 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 806 LIR_Address* to_addr = dest->as_address_ptr(); 807 PatchingStub* patch = NULL; 808 Register compressed_src = rscratch1; 809 810 if (patch_code != lir_patch_none) { 811 deoptimize_trap(info); 812 return; 813 } 814 815 if (type == T_ARRAY || type == T_OBJECT) { 816 __ verify_oop(src->as_register()); 817 818 if (UseCompressedOops && !wide) { 819 __ encode_heap_oop(compressed_src, src->as_register()); 820 } else { 821 compressed_src = src->as_register(); 822 } 823 } 824 825 int null_check_here = code_offset(); 826 switch (type) { 827 case T_FLOAT: { 828 __ strs(src->as_float_reg(), as_Address(to_addr)); 829 break; 830 } 831 832 case T_DOUBLE: { 833 __ strd(src->as_double_reg(), as_Address(to_addr)); 834 break; 835 } 836 837 case T_ARRAY: // fall through 838 case T_OBJECT: // fall through 839 if (UseCompressedOops && !wide) { 840 __ strw(compressed_src, as_Address(to_addr, rscratch2)); 841 } else { 842 __ str(compressed_src, as_Address(to_addr)); 843 } 844 break; 845 case T_METADATA: 846 // We get here to store a method pointer to the stack to pass to 847 // a dtrace runtime call. This can't work on 64 bit with 848 // compressed klass ptrs: T_METADATA can be a compressed klass 849 // ptr or a 64 bit method pointer. 850 ShouldNotReachHere(); 851 __ str(src->as_register(), as_Address(to_addr)); 852 break; 853 case T_ADDRESS: 854 __ str(src->as_register(), as_Address(to_addr)); 855 break; 856 case T_INT: 857 __ strw(src->as_register(), as_Address(to_addr)); 858 break; 859 860 case T_LONG: { 861 __ str(src->as_register_lo(), as_Address_lo(to_addr)); 862 break; 863 } 864 865 case T_BYTE: // fall through 866 case T_BOOLEAN: { 867 __ strb(src->as_register(), as_Address(to_addr)); 868 break; 869 } 870 871 case T_CHAR: // fall through 872 case T_SHORT: 873 __ strh(src->as_register(), as_Address(to_addr)); 874 break; 875 876 default: 877 ShouldNotReachHere(); 878 } 879 if (info != NULL) { 880 add_debug_info_for_null_check(null_check_here, info); 881 } 882 } 883 884 885 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 886 assert(src->is_stack(), "should not call otherwise"); 887 assert(dest->is_register(), "should not call otherwise"); 888 889 if (dest->is_single_cpu()) { 890 if (type == T_ARRAY || type == T_OBJECT) { 891 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 892 __ verify_oop(dest->as_register()); 893 } else if (type == T_METADATA) { 894 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 895 } else { 896 __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 897 } 898 899 } else if (dest->is_double_cpu()) { 900 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 901 __ ldr(dest->as_register_lo(), src_addr_LO); 902 903 } else if (dest->is_single_fpu()) { 904 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 905 __ ldrs(dest->as_float_reg(), src_addr); 906 907 } else if (dest->is_double_fpu()) { 908 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 909 __ ldrd(dest->as_double_reg(), src_addr); 910 911 } else { 912 ShouldNotReachHere(); 913 } 914 } 915 916 917 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 918 address target = NULL; 919 relocInfo::relocType reloc_type = relocInfo::none; 920 921 switch (patching_id(info)) { 922 case PatchingStub::access_field_id: 923 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 924 reloc_type = relocInfo::section_word_type; 925 break; 926 case PatchingStub::load_klass_id: 927 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 928 reloc_type = relocInfo::metadata_type; 929 break; 930 case PatchingStub::load_mirror_id: 931 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 932 reloc_type = relocInfo::oop_type; 933 break; 934 case PatchingStub::load_appendix_id: 935 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 936 reloc_type = relocInfo::oop_type; 937 break; 938 default: ShouldNotReachHere(); 939 } 940 941 __ far_call(RuntimeAddress(target)); 942 add_call_info_here(info); 943 } 944 945 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 946 947 LIR_Opr temp; 948 if (type == T_LONG || type == T_DOUBLE) 949 temp = FrameMap::rscratch1_long_opr; 950 else 951 temp = FrameMap::rscratch1_opr; 952 953 stack2reg(src, temp, src->type()); 954 reg2stack(temp, dest, dest->type(), false); 955 } 956 957 958 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 959 LIR_Address* addr = src->as_address_ptr(); 960 LIR_Address* from_addr = src->as_address_ptr(); 961 962 if (addr->base()->type() == T_OBJECT) { 963 __ verify_oop(addr->base()->as_pointer_register()); 964 } 965 966 if (patch_code != lir_patch_none) { 967 deoptimize_trap(info); 968 return; 969 } 970 971 if (info != NULL) { 972 add_debug_info_for_null_check_here(info); 973 } 974 int null_check_here = code_offset(); 975 switch (type) { 976 case T_FLOAT: { 977 __ ldrs(dest->as_float_reg(), as_Address(from_addr)); 978 break; 979 } 980 981 case T_DOUBLE: { 982 __ ldrd(dest->as_double_reg(), as_Address(from_addr)); 983 break; 984 } 985 986 case T_ARRAY: // fall through 987 case T_OBJECT: // fall through 988 if (UseCompressedOops && !wide) { 989 __ ldrw(dest->as_register(), as_Address(from_addr)); 990 } else { 991 __ ldr(dest->as_register(), as_Address(from_addr)); 992 } 993 break; 994 case T_METADATA: 995 // We get here to store a method pointer to the stack to pass to 996 // a dtrace runtime call. This can't work on 64 bit with 997 // compressed klass ptrs: T_METADATA can be a compressed klass 998 // ptr or a 64 bit method pointer. 999 ShouldNotReachHere(); 1000 __ ldr(dest->as_register(), as_Address(from_addr)); 1001 break; 1002 case T_ADDRESS: 1003 // FIXME: OMG this is a horrible kludge. Any offset from an 1004 // address that matches klass_offset_in_bytes() will be loaded 1005 // as a word, not a long. 1006 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1007 __ ldrw(dest->as_register(), as_Address(from_addr)); 1008 } else { 1009 __ ldr(dest->as_register(), as_Address(from_addr)); 1010 } 1011 break; 1012 case T_INT: 1013 __ ldrw(dest->as_register(), as_Address(from_addr)); 1014 break; 1015 1016 case T_LONG: { 1017 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr)); 1018 break; 1019 } 1020 1021 case T_BYTE: 1022 __ ldrsb(dest->as_register(), as_Address(from_addr)); 1023 break; 1024 case T_BOOLEAN: { 1025 __ ldrb(dest->as_register(), as_Address(from_addr)); 1026 break; 1027 } 1028 1029 case T_CHAR: 1030 __ ldrh(dest->as_register(), as_Address(from_addr)); 1031 break; 1032 case T_SHORT: 1033 __ ldrsh(dest->as_register(), as_Address(from_addr)); 1034 break; 1035 1036 default: 1037 ShouldNotReachHere(); 1038 } 1039 1040 if (type == T_ARRAY || type == T_OBJECT) { 1041 if (UseCompressedOops && !wide) { 1042 __ decode_heap_oop(dest->as_register()); 1043 } 1044 __ verify_oop(dest->as_register()); 1045 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1046 if (UseCompressedClassPointers) { 1047 __ decode_klass_not_null(dest->as_register()); 1048 } 1049 } 1050 } 1051 1052 1053 int LIR_Assembler::array_element_size(BasicType type) const { 1054 int elem_size = type2aelembytes(type); 1055 return exact_log2(elem_size); 1056 } 1057 1058 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1059 Register Rdividend = op->in_opr1()->as_register(); 1060 Register Rdivisor = op->in_opr2()->as_register(); 1061 Register Rscratch = op->in_opr3()->as_register(); 1062 Register Rresult = op->result_opr()->as_register(); 1063 int divisor = -1; 1064 1065 /* 1066 TODO: For some reason, using the Rscratch that gets passed in is 1067 not possible because the register allocator does not see the tmp reg 1068 as used, and assignes it the same register as Rdividend. We use rscratch1 1069 instead. 1070 1071 assert(Rdividend != Rscratch, ""); 1072 assert(Rdivisor != Rscratch, ""); 1073 */ 1074 1075 if (Rdivisor == noreg && is_power_of_2(divisor)) { 1076 // convert division by a power of two into some shifts and logical operations 1077 } 1078 1079 if (op->code() == lir_irem) { 1080 __ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1); 1081 } else if (op->code() == lir_idiv) { 1082 __ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1); 1083 } else 1084 ShouldNotReachHere(); 1085 } 1086 1087 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1088 #ifdef ASSERT 1089 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1090 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1091 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1092 #endif 1093 1094 if (op->cond() == lir_cond_always) { 1095 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1096 __ b(*(op->label())); 1097 } else { 1098 Assembler::Condition acond; 1099 if (op->code() == lir_cond_float_branch) { 1100 bool is_unordered = (op->ublock() == op->block()); 1101 // Assembler::EQ does not permit unordered branches, so we add 1102 // another branch here. Likewise, Assembler::NE does not permit 1103 // ordered branches. 1104 if (is_unordered && op->cond() == lir_cond_equal 1105 || !is_unordered && op->cond() == lir_cond_notEqual) 1106 __ br(Assembler::VS, *(op->ublock()->label())); 1107 switch(op->cond()) { 1108 case lir_cond_equal: acond = Assembler::EQ; break; 1109 case lir_cond_notEqual: acond = Assembler::NE; break; 1110 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break; 1111 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break; 1112 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break; 1113 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break; 1114 default: ShouldNotReachHere(); 1115 acond = Assembler::EQ; // unreachable 1116 } 1117 } else { 1118 switch (op->cond()) { 1119 case lir_cond_equal: acond = Assembler::EQ; break; 1120 case lir_cond_notEqual: acond = Assembler::NE; break; 1121 case lir_cond_less: acond = Assembler::LT; break; 1122 case lir_cond_lessEqual: acond = Assembler::LE; break; 1123 case lir_cond_greaterEqual: acond = Assembler::GE; break; 1124 case lir_cond_greater: acond = Assembler::GT; break; 1125 case lir_cond_belowEqual: acond = Assembler::LS; break; 1126 case lir_cond_aboveEqual: acond = Assembler::HS; break; 1127 default: ShouldNotReachHere(); 1128 acond = Assembler::EQ; // unreachable 1129 } 1130 } 1131 __ br(acond,*(op->label())); 1132 } 1133 } 1134 1135 1136 1137 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1138 LIR_Opr src = op->in_opr(); 1139 LIR_Opr dest = op->result_opr(); 1140 1141 switch (op->bytecode()) { 1142 case Bytecodes::_i2f: 1143 { 1144 __ scvtfws(dest->as_float_reg(), src->as_register()); 1145 break; 1146 } 1147 case Bytecodes::_i2d: 1148 { 1149 __ scvtfwd(dest->as_double_reg(), src->as_register()); 1150 break; 1151 } 1152 case Bytecodes::_l2d: 1153 { 1154 __ scvtfd(dest->as_double_reg(), src->as_register_lo()); 1155 break; 1156 } 1157 case Bytecodes::_l2f: 1158 { 1159 __ scvtfs(dest->as_float_reg(), src->as_register_lo()); 1160 break; 1161 } 1162 case Bytecodes::_f2d: 1163 { 1164 __ fcvts(dest->as_double_reg(), src->as_float_reg()); 1165 break; 1166 } 1167 case Bytecodes::_d2f: 1168 { 1169 __ fcvtd(dest->as_float_reg(), src->as_double_reg()); 1170 break; 1171 } 1172 case Bytecodes::_i2c: 1173 { 1174 __ ubfx(dest->as_register(), src->as_register(), 0, 16); 1175 break; 1176 } 1177 case Bytecodes::_i2l: 1178 { 1179 __ sxtw(dest->as_register_lo(), src->as_register()); 1180 break; 1181 } 1182 case Bytecodes::_i2s: 1183 { 1184 __ sxth(dest->as_register(), src->as_register()); 1185 break; 1186 } 1187 case Bytecodes::_i2b: 1188 { 1189 __ sxtb(dest->as_register(), src->as_register()); 1190 break; 1191 } 1192 case Bytecodes::_l2i: 1193 { 1194 _masm->block_comment("FIXME: This could be a no-op"); 1195 __ uxtw(dest->as_register(), src->as_register_lo()); 1196 break; 1197 } 1198 case Bytecodes::_d2l: 1199 { 1200 __ fcvtzd(dest->as_register_lo(), src->as_double_reg()); 1201 break; 1202 } 1203 case Bytecodes::_f2i: 1204 { 1205 __ fcvtzsw(dest->as_register(), src->as_float_reg()); 1206 break; 1207 } 1208 case Bytecodes::_f2l: 1209 { 1210 __ fcvtzs(dest->as_register_lo(), src->as_float_reg()); 1211 break; 1212 } 1213 case Bytecodes::_d2i: 1214 { 1215 __ fcvtzdw(dest->as_register(), src->as_double_reg()); 1216 break; 1217 } 1218 default: ShouldNotReachHere(); 1219 } 1220 } 1221 1222 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1223 if (op->init_check()) { 1224 __ ldrb(rscratch1, Address(op->klass()->as_register(), 1225 InstanceKlass::init_state_offset())); 1226 __ cmpw(rscratch1, InstanceKlass::fully_initialized); 1227 add_debug_info_for_null_check_here(op->stub()->info()); 1228 __ br(Assembler::NE, *op->stub()->entry()); 1229 } 1230 __ allocate_object(op->obj()->as_register(), 1231 op->tmp1()->as_register(), 1232 op->tmp2()->as_register(), 1233 op->header_size(), 1234 op->object_size(), 1235 op->klass()->as_register(), 1236 *op->stub()->entry()); 1237 __ bind(*op->stub()->continuation()); 1238 } 1239 1240 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1241 Register len = op->len()->as_register(); 1242 __ uxtw(len, len); 1243 1244 if (UseSlowPath || 1245 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1246 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1247 __ b(*op->stub()->entry()); 1248 } else { 1249 Register tmp1 = op->tmp1()->as_register(); 1250 Register tmp2 = op->tmp2()->as_register(); 1251 Register tmp3 = op->tmp3()->as_register(); 1252 if (len == tmp1) { 1253 tmp1 = tmp3; 1254 } else if (len == tmp2) { 1255 tmp2 = tmp3; 1256 } else if (len == tmp3) { 1257 // everything is ok 1258 } else { 1259 __ mov(tmp3, len); 1260 } 1261 __ allocate_array(op->obj()->as_register(), 1262 len, 1263 tmp1, 1264 tmp2, 1265 arrayOopDesc::header_size(op->type()), 1266 array_element_size(op->type()), 1267 op->klass()->as_register(), 1268 *op->stub()->entry()); 1269 } 1270 __ bind(*op->stub()->continuation()); 1271 } 1272 1273 void LIR_Assembler::type_profile_helper(Register mdo, 1274 ciMethodData *md, ciProfileData *data, 1275 Register recv, Label* update_done) { 1276 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1277 Label next_test; 1278 // See if the receiver is receiver[n]. 1279 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1280 __ ldr(rscratch1, Address(rscratch2)); 1281 __ cmp(recv, rscratch1); 1282 __ br(Assembler::NE, next_test); 1283 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1284 __ addptr(data_addr, DataLayout::counter_increment); 1285 __ b(*update_done); 1286 __ bind(next_test); 1287 } 1288 1289 // Didn't find receiver; find next empty slot and fill it in 1290 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1291 Label next_test; 1292 __ lea(rscratch2, 1293 Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1294 Address recv_addr(rscratch2); 1295 __ ldr(rscratch1, recv_addr); 1296 __ cbnz(rscratch1, next_test); 1297 __ str(recv, recv_addr); 1298 __ mov(rscratch1, DataLayout::counter_increment); 1299 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); 1300 __ str(rscratch1, Address(rscratch2)); 1301 __ b(*update_done); 1302 __ bind(next_test); 1303 } 1304 } 1305 1306 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1307 // we always need a stub for the failure case. 1308 CodeStub* stub = op->stub(); 1309 Register obj = op->object()->as_register(); 1310 Register k_RInfo = op->tmp1()->as_register(); 1311 Register klass_RInfo = op->tmp2()->as_register(); 1312 Register dst = op->result_opr()->as_register(); 1313 ciKlass* k = op->klass(); 1314 Register Rtmp1 = noreg; 1315 1316 // check if it needs to be profiled 1317 ciMethodData* md; 1318 ciProfileData* data; 1319 1320 const bool should_profile = op->should_profile(); 1321 1322 if (should_profile) { 1323 ciMethod* method = op->profiled_method(); 1324 assert(method != NULL, "Should have method"); 1325 int bci = op->profiled_bci(); 1326 md = method->method_data_or_null(); 1327 assert(md != NULL, "Sanity"); 1328 data = md->bci_to_data(bci); 1329 assert(data != NULL, "need data for type check"); 1330 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1331 } 1332 Label profile_cast_success, profile_cast_failure; 1333 Label *success_target = should_profile ? &profile_cast_success : success; 1334 Label *failure_target = should_profile ? &profile_cast_failure : failure; 1335 1336 if (obj == k_RInfo) { 1337 k_RInfo = dst; 1338 } else if (obj == klass_RInfo) { 1339 klass_RInfo = dst; 1340 } 1341 if (k->is_loaded() && !UseCompressedClassPointers) { 1342 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1343 } else { 1344 Rtmp1 = op->tmp3()->as_register(); 1345 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1346 } 1347 1348 assert_different_registers(obj, k_RInfo, klass_RInfo); 1349 1350 if (should_profile) { 1351 Label not_null; 1352 __ cbnz(obj, not_null); 1353 // Object is null; update MDO and exit 1354 Register mdo = klass_RInfo; 1355 __ mov_metadata(mdo, md->constant_encoding()); 1356 Address data_addr 1357 = __ form_address(rscratch2, mdo, 1358 md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()), 1359 LogBytesPerWord); 1360 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1361 __ ldr(rscratch1, data_addr); 1362 __ orr(rscratch1, rscratch1, header_bits); 1363 __ str(rscratch1, data_addr); 1364 __ b(*obj_is_null); 1365 __ bind(not_null); 1366 } else { 1367 __ cbz(obj, *obj_is_null); 1368 } 1369 1370 if (!k->is_loaded()) { 1371 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1372 } else { 1373 __ mov_metadata(k_RInfo, k->constant_encoding()); 1374 } 1375 __ verify_oop(obj); 1376 1377 if (op->fast_check()) { 1378 // get object class 1379 // not a safepoint as obj null check happens earlier 1380 __ load_klass(rscratch1, obj); 1381 __ cmp( rscratch1, k_RInfo); 1382 1383 __ br(Assembler::NE, *failure_target); 1384 // successful cast, fall through to profile or jump 1385 } else { 1386 // get object class 1387 // not a safepoint as obj null check happens earlier 1388 __ load_klass(klass_RInfo, obj); 1389 if (k->is_loaded()) { 1390 // See if we get an immediate positive hit 1391 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset()))); 1392 __ cmp(k_RInfo, rscratch1); 1393 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1394 __ br(Assembler::NE, *failure_target); 1395 // successful cast, fall through to profile or jump 1396 } else { 1397 // See if we get an immediate positive hit 1398 __ br(Assembler::EQ, *success_target); 1399 // check for self 1400 __ cmp(klass_RInfo, k_RInfo); 1401 __ br(Assembler::EQ, *success_target); 1402 1403 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1404 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1405 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1406 // result is a boolean 1407 __ cbzw(klass_RInfo, *failure_target); 1408 // successful cast, fall through to profile or jump 1409 } 1410 } else { 1411 // perform the fast part of the checking logic 1412 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1413 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1414 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1415 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1416 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1417 // result is a boolean 1418 __ cbz(k_RInfo, *failure_target); 1419 // successful cast, fall through to profile or jump 1420 } 1421 } 1422 if (should_profile) { 1423 Register mdo = klass_RInfo, recv = k_RInfo; 1424 __ bind(profile_cast_success); 1425 __ mov_metadata(mdo, md->constant_encoding()); 1426 __ load_klass(recv, obj); 1427 Label update_done; 1428 type_profile_helper(mdo, md, data, recv, success); 1429 __ b(*success); 1430 1431 __ bind(profile_cast_failure); 1432 __ mov_metadata(mdo, md->constant_encoding()); 1433 Address counter_addr 1434 = __ form_address(rscratch2, mdo, 1435 md->byte_offset_of_slot(data, CounterData::count_offset()), 1436 LogBytesPerWord); 1437 __ ldr(rscratch1, counter_addr); 1438 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1439 __ str(rscratch1, counter_addr); 1440 __ b(*failure); 1441 } 1442 __ b(*success); 1443 } 1444 1445 1446 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1447 const bool should_profile = op->should_profile(); 1448 1449 LIR_Code code = op->code(); 1450 if (code == lir_store_check) { 1451 Register value = op->object()->as_register(); 1452 Register array = op->array()->as_register(); 1453 Register k_RInfo = op->tmp1()->as_register(); 1454 Register klass_RInfo = op->tmp2()->as_register(); 1455 Register Rtmp1 = op->tmp3()->as_register(); 1456 1457 CodeStub* stub = op->stub(); 1458 1459 // check if it needs to be profiled 1460 ciMethodData* md; 1461 ciProfileData* data; 1462 1463 if (should_profile) { 1464 ciMethod* method = op->profiled_method(); 1465 assert(method != NULL, "Should have method"); 1466 int bci = op->profiled_bci(); 1467 md = method->method_data_or_null(); 1468 assert(md != NULL, "Sanity"); 1469 data = md->bci_to_data(bci); 1470 assert(data != NULL, "need data for type check"); 1471 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1472 } 1473 Label profile_cast_success, profile_cast_failure, done; 1474 Label *success_target = should_profile ? &profile_cast_success : &done; 1475 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); 1476 1477 if (should_profile) { 1478 Label not_null; 1479 __ cbnz(value, not_null); 1480 // Object is null; update MDO and exit 1481 Register mdo = klass_RInfo; 1482 __ mov_metadata(mdo, md->constant_encoding()); 1483 Address data_addr 1484 = __ form_address(rscratch2, mdo, 1485 md->byte_offset_of_slot(data, DataLayout::header_offset()), 1486 LogBytesPerInt); 1487 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1488 __ ldrw(rscratch1, data_addr); 1489 __ orrw(rscratch1, rscratch1, header_bits); 1490 __ strw(rscratch1, data_addr); 1491 __ b(done); 1492 __ bind(not_null); 1493 } else { 1494 __ cbz(value, done); 1495 } 1496 1497 add_debug_info_for_null_check_here(op->info_for_exception()); 1498 __ load_klass(k_RInfo, array); 1499 __ load_klass(klass_RInfo, value); 1500 1501 // get instance klass (it's already uncompressed) 1502 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1503 // perform the fast part of the checking logic 1504 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1505 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1506 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); 1507 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1508 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); 1509 // result is a boolean 1510 __ cbzw(k_RInfo, *failure_target); 1511 // fall through to the success case 1512 1513 if (should_profile) { 1514 Register mdo = klass_RInfo, recv = k_RInfo; 1515 __ bind(profile_cast_success); 1516 __ mov_metadata(mdo, md->constant_encoding()); 1517 __ load_klass(recv, value); 1518 Label update_done; 1519 type_profile_helper(mdo, md, data, recv, &done); 1520 __ b(done); 1521 1522 __ bind(profile_cast_failure); 1523 __ mov_metadata(mdo, md->constant_encoding()); 1524 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1525 __ lea(rscratch2, counter_addr); 1526 __ ldr(rscratch1, Address(rscratch2)); 1527 __ sub(rscratch1, rscratch1, DataLayout::counter_increment); 1528 __ str(rscratch1, Address(rscratch2)); 1529 __ b(*stub->entry()); 1530 } 1531 1532 __ bind(done); 1533 } else if (code == lir_checkcast) { 1534 Register obj = op->object()->as_register(); 1535 Register dst = op->result_opr()->as_register(); 1536 Label success; 1537 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1538 __ bind(success); 1539 if (dst != obj) { 1540 __ mov(dst, obj); 1541 } 1542 } else if (code == lir_instanceof) { 1543 Register obj = op->object()->as_register(); 1544 Register dst = op->result_opr()->as_register(); 1545 Label success, failure, done; 1546 emit_typecheck_helper(op, &success, &failure, &failure); 1547 __ bind(failure); 1548 __ mov(dst, zr); 1549 __ b(done); 1550 __ bind(success); 1551 __ mov(dst, 1); 1552 __ bind(done); 1553 } else { 1554 ShouldNotReachHere(); 1555 } 1556 } 1557 1558 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { 1559 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1560 __ cset(rscratch1, Assembler::NE); 1561 __ membar(__ AnyAny); 1562 } 1563 1564 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { 1565 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 1566 __ cset(rscratch1, Assembler::NE); 1567 __ membar(__ AnyAny); 1568 } 1569 1570 1571 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1572 assert(VM_Version::supports_cx8(), "wrong machine"); 1573 Register addr = as_reg(op->addr()); 1574 Register newval = as_reg(op->new_value()); 1575 Register cmpval = as_reg(op->cmp_value()); 1576 Label succeed, fail, around; 1577 1578 if (op->code() == lir_cas_obj) { 1579 if (UseCompressedOops) { 1580 Register t1 = op->tmp1()->as_register(); 1581 assert(op->tmp1()->is_valid(), "must be"); 1582 __ encode_heap_oop(t1, cmpval); 1583 cmpval = t1; 1584 __ encode_heap_oop(rscratch2, newval); 1585 newval = rscratch2; 1586 casw(addr, newval, cmpval); 1587 } else { 1588 casl(addr, newval, cmpval); 1589 } 1590 } else if (op->code() == lir_cas_int) { 1591 casw(addr, newval, cmpval); 1592 } else { 1593 casl(addr, newval, cmpval); 1594 } 1595 } 1596 1597 1598 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1599 1600 Assembler::Condition acond, ncond; 1601 switch (condition) { 1602 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break; 1603 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break; 1604 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break; 1605 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break; 1606 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break; 1607 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break; 1608 case lir_cond_belowEqual: 1609 case lir_cond_aboveEqual: 1610 default: ShouldNotReachHere(); 1611 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable 1612 } 1613 1614 assert(result->is_single_cpu() || result->is_double_cpu(), 1615 "expect single register for result"); 1616 if (opr1->is_constant() && opr2->is_constant() 1617 && opr1->type() == T_INT && opr2->type() == T_INT) { 1618 jint val1 = opr1->as_jint(); 1619 jint val2 = opr2->as_jint(); 1620 if (val1 == 0 && val2 == 1) { 1621 __ cset(result->as_register(), ncond); 1622 return; 1623 } else if (val1 == 1 && val2 == 0) { 1624 __ cset(result->as_register(), acond); 1625 return; 1626 } 1627 } 1628 1629 if (opr1->is_constant() && opr2->is_constant() 1630 && opr1->type() == T_LONG && opr2->type() == T_LONG) { 1631 jlong val1 = opr1->as_jlong(); 1632 jlong val2 = opr2->as_jlong(); 1633 if (val1 == 0 && val2 == 1) { 1634 __ cset(result->as_register_lo(), ncond); 1635 return; 1636 } else if (val1 == 1 && val2 == 0) { 1637 __ cset(result->as_register_lo(), acond); 1638 return; 1639 } 1640 } 1641 1642 if (opr1->is_stack()) { 1643 stack2reg(opr1, FrameMap::rscratch1_opr, result->type()); 1644 opr1 = FrameMap::rscratch1_opr; 1645 } else if (opr1->is_constant()) { 1646 LIR_Opr tmp 1647 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr; 1648 const2reg(opr1, tmp, lir_patch_none, NULL); 1649 opr1 = tmp; 1650 } 1651 1652 if (opr2->is_stack()) { 1653 stack2reg(opr2, FrameMap::rscratch2_opr, result->type()); 1654 opr2 = FrameMap::rscratch2_opr; 1655 } else if (opr2->is_constant()) { 1656 LIR_Opr tmp 1657 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr; 1658 const2reg(opr2, tmp, lir_patch_none, NULL); 1659 opr2 = tmp; 1660 } 1661 1662 if (result->type() == T_LONG) 1663 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond); 1664 else 1665 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond); 1666 } 1667 1668 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1669 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1670 1671 if (left->is_single_cpu()) { 1672 Register lreg = left->as_register(); 1673 Register dreg = as_reg(dest); 1674 1675 if (right->is_single_cpu()) { 1676 // cpu register - cpu register 1677 1678 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, 1679 "should be"); 1680 Register rreg = right->as_register(); 1681 switch (code) { 1682 case lir_add: __ addw (dest->as_register(), lreg, rreg); break; 1683 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break; 1684 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break; 1685 default: ShouldNotReachHere(); 1686 } 1687 1688 } else if (right->is_double_cpu()) { 1689 Register rreg = right->as_register_lo(); 1690 // single_cpu + double_cpu: can happen with obj+long 1691 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1692 switch (code) { 1693 case lir_add: __ add(dreg, lreg, rreg); break; 1694 case lir_sub: __ sub(dreg, lreg, rreg); break; 1695 default: ShouldNotReachHere(); 1696 } 1697 } else if (right->is_constant()) { 1698 // cpu register - constant 1699 jlong c; 1700 1701 // FIXME. This is fugly: we really need to factor all this logic. 1702 switch(right->type()) { 1703 case T_LONG: 1704 c = right->as_constant_ptr()->as_jlong(); 1705 break; 1706 case T_INT: 1707 case T_ADDRESS: 1708 c = right->as_constant_ptr()->as_jint(); 1709 break; 1710 default: 1711 ShouldNotReachHere(); 1712 c = 0; // unreachable 1713 break; 1714 } 1715 1716 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1717 if (c == 0 && dreg == lreg) { 1718 COMMENT("effective nop elided"); 1719 return; 1720 } 1721 switch(left->type()) { 1722 case T_INT: 1723 switch (code) { 1724 case lir_add: __ addw(dreg, lreg, c); break; 1725 case lir_sub: __ subw(dreg, lreg, c); break; 1726 default: ShouldNotReachHere(); 1727 } 1728 break; 1729 case T_OBJECT: 1730 case T_ADDRESS: 1731 switch (code) { 1732 case lir_add: __ add(dreg, lreg, c); break; 1733 case lir_sub: __ sub(dreg, lreg, c); break; 1734 default: ShouldNotReachHere(); 1735 } 1736 break; 1737 ShouldNotReachHere(); 1738 } 1739 } else { 1740 ShouldNotReachHere(); 1741 } 1742 1743 } else if (left->is_double_cpu()) { 1744 Register lreg_lo = left->as_register_lo(); 1745 1746 if (right->is_double_cpu()) { 1747 // cpu register - cpu register 1748 Register rreg_lo = right->as_register_lo(); 1749 switch (code) { 1750 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1751 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1752 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break; 1753 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break; 1754 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break; 1755 default: 1756 ShouldNotReachHere(); 1757 } 1758 1759 } else if (right->is_constant()) { 1760 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1761 Register dreg = as_reg(dest); 1762 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); 1763 if (c == 0 && dreg == lreg_lo) { 1764 COMMENT("effective nop elided"); 1765 return; 1766 } 1767 switch (code) { 1768 case lir_add: __ add(dreg, lreg_lo, c); break; 1769 case lir_sub: __ sub(dreg, lreg_lo, c); break; 1770 default: 1771 ShouldNotReachHere(); 1772 } 1773 } else { 1774 ShouldNotReachHere(); 1775 } 1776 } else if (left->is_single_fpu()) { 1777 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register"); 1778 switch (code) { 1779 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1780 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1781 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1782 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; 1783 default: 1784 ShouldNotReachHere(); 1785 } 1786 } else if (left->is_double_fpu()) { 1787 if (right->is_double_fpu()) { 1788 // cpu register - cpu register 1789 switch (code) { 1790 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1791 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1792 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1793 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; 1794 default: 1795 ShouldNotReachHere(); 1796 } 1797 } else { 1798 if (right->is_constant()) { 1799 ShouldNotReachHere(); 1800 } 1801 ShouldNotReachHere(); 1802 } 1803 } else if (left->is_single_stack() || left->is_address()) { 1804 assert(left == dest, "left and dest must be equal"); 1805 ShouldNotReachHere(); 1806 } else { 1807 ShouldNotReachHere(); 1808 } 1809 } 1810 1811 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); } 1812 1813 1814 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1815 switch(code) { 1816 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break; 1817 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break; 1818 default : ShouldNotReachHere(); 1819 } 1820 } 1821 1822 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1823 1824 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); 1825 Register Rleft = left->is_single_cpu() ? left->as_register() : 1826 left->as_register_lo(); 1827 if (dst->is_single_cpu()) { 1828 Register Rdst = dst->as_register(); 1829 if (right->is_constant()) { 1830 switch (code) { 1831 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break; 1832 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break; 1833 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break; 1834 default: ShouldNotReachHere(); break; 1835 } 1836 } else { 1837 Register Rright = right->is_single_cpu() ? right->as_register() : 1838 right->as_register_lo(); 1839 switch (code) { 1840 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break; 1841 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break; 1842 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break; 1843 default: ShouldNotReachHere(); break; 1844 } 1845 } 1846 } else { 1847 Register Rdst = dst->as_register_lo(); 1848 if (right->is_constant()) { 1849 switch (code) { 1850 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break; 1851 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break; 1852 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break; 1853 default: ShouldNotReachHere(); break; 1854 } 1855 } else { 1856 Register Rright = right->is_single_cpu() ? right->as_register() : 1857 right->as_register_lo(); 1858 switch (code) { 1859 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break; 1860 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break; 1861 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break; 1862 default: ShouldNotReachHere(); break; 1863 } 1864 } 1865 } 1866 } 1867 1868 1869 1870 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); } 1871 1872 1873 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1874 if (opr1->is_constant() && opr2->is_single_cpu()) { 1875 // tableswitch 1876 Register reg = as_reg(opr2); 1877 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()]; 1878 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after); 1879 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { 1880 Register reg1 = as_reg(opr1); 1881 if (opr2->is_single_cpu()) { 1882 // cpu register - cpu register 1883 Register reg2 = opr2->as_register(); 1884 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1885 __ cmp(reg1, reg2); 1886 } else { 1887 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 1888 __ cmpw(reg1, reg2); 1889 } 1890 return; 1891 } 1892 if (opr2->is_double_cpu()) { 1893 // cpu register - cpu register 1894 Register reg2 = opr2->as_register_lo(); 1895 __ cmp(reg1, reg2); 1896 return; 1897 } 1898 1899 if (opr2->is_constant()) { 1900 jlong imm; 1901 switch(opr2->type()) { 1902 case T_LONG: 1903 imm = opr2->as_constant_ptr()->as_jlong(); 1904 break; 1905 case T_INT: 1906 case T_ADDRESS: 1907 imm = opr2->as_constant_ptr()->as_jint(); 1908 break; 1909 case T_OBJECT: 1910 case T_ARRAY: 1911 imm = jlong(opr2->as_constant_ptr()->as_jobject()); 1912 break; 1913 default: 1914 ShouldNotReachHere(); 1915 imm = 0; // unreachable 1916 break; 1917 } 1918 1919 if (Assembler::operand_valid_for_add_sub_immediate(imm)) { 1920 if (type2aelembytes(opr1->type()) <= 4) 1921 __ cmpw(reg1, imm); 1922 else 1923 __ cmp(reg1, imm); 1924 return; 1925 } else { 1926 __ mov(rscratch1, imm); 1927 if (type2aelembytes(opr1->type()) <= 4) 1928 __ cmpw(reg1, rscratch1); 1929 else 1930 __ cmp(reg1, rscratch1); 1931 return; 1932 } 1933 } else 1934 ShouldNotReachHere(); 1935 } else if (opr1->is_single_fpu()) { 1936 FloatRegister reg1 = opr1->as_float_reg(); 1937 assert(opr2->is_single_fpu(), "expect single float register"); 1938 FloatRegister reg2 = opr2->as_float_reg(); 1939 __ fcmps(reg1, reg2); 1940 } else if (opr1->is_double_fpu()) { 1941 FloatRegister reg1 = opr1->as_double_reg(); 1942 assert(opr2->is_double_fpu(), "expect double float register"); 1943 FloatRegister reg2 = opr2->as_double_reg(); 1944 __ fcmpd(reg1, reg2); 1945 } else { 1946 ShouldNotReachHere(); 1947 } 1948 } 1949 1950 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1951 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1952 bool is_unordered_less = (code == lir_ucmp_fd2i); 1953 if (left->is_single_fpu()) { 1954 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1955 } else if (left->is_double_fpu()) { 1956 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1957 } else { 1958 ShouldNotReachHere(); 1959 } 1960 } else if (code == lir_cmp_l2i) { 1961 Label done; 1962 __ cmp(left->as_register_lo(), right->as_register_lo()); 1963 __ mov(dst->as_register(), (u_int64_t)-1L); 1964 __ br(Assembler::LT, done); 1965 __ csinc(dst->as_register(), zr, zr, Assembler::EQ); 1966 __ bind(done); 1967 } else { 1968 ShouldNotReachHere(); 1969 } 1970 } 1971 1972 1973 void LIR_Assembler::align_call(LIR_Code code) { } 1974 1975 1976 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 1977 address call = __ trampoline_call(Address(op->addr(), rtype)); 1978 if (call == NULL) { 1979 bailout("trampoline stub overflow"); 1980 return; 1981 } 1982 add_call_info(code_offset(), op->info()); 1983 } 1984 1985 1986 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 1987 address call = __ ic_call(op->addr()); 1988 if (call == NULL) { 1989 bailout("trampoline stub overflow"); 1990 return; 1991 } 1992 add_call_info(code_offset(), op->info()); 1993 } 1994 1995 1996 /* Currently, vtable-dispatch is only enabled for sparc platforms */ 1997 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 1998 ShouldNotReachHere(); 1999 } 2000 2001 2002 void LIR_Assembler::emit_static_call_stub() { 2003 address call_pc = __ pc(); 2004 address stub = __ start_a_stub(call_stub_size); 2005 if (stub == NULL) { 2006 bailout("static call stub overflow"); 2007 return; 2008 } 2009 2010 int start = __ offset(); 2011 2012 __ relocate(static_stub_Relocation::spec(call_pc)); 2013 __ mov_metadata(rmethod, (Metadata*)NULL); 2014 __ movptr(rscratch1, 0); 2015 __ br(rscratch1); 2016 2017 assert(__ offset() - start <= call_stub_size, "stub too big"); 2018 __ end_a_stub(); 2019 } 2020 2021 2022 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2023 assert(exceptionOop->as_register() == r0, "must match"); 2024 assert(exceptionPC->as_register() == r3, "must match"); 2025 2026 // exception object is not added to oop map by LinearScan 2027 // (LinearScan assumes that no oops are in fixed registers) 2028 info->add_register_oop(exceptionOop); 2029 Runtime1::StubID unwind_id; 2030 2031 // get current pc information 2032 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2033 int pc_for_athrow_offset = __ offset(); 2034 InternalAddress pc_for_athrow(__ pc()); 2035 __ adr(exceptionPC->as_register(), pc_for_athrow); 2036 add_call_info(pc_for_athrow_offset, info); // for exception handler 2037 2038 __ verify_not_null_oop(r0); 2039 // search an exception handler (r0: exception oop, r3: throwing pc) 2040 if (compilation()->has_fpu_code()) { 2041 unwind_id = Runtime1::handle_exception_id; 2042 } else { 2043 unwind_id = Runtime1::handle_exception_nofpu_id; 2044 } 2045 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2046 2047 // FIXME: enough room for two byte trap ???? 2048 __ nop(); 2049 } 2050 2051 2052 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2053 assert(exceptionOop->as_register() == r0, "must match"); 2054 2055 __ b(_unwind_handler_entry); 2056 } 2057 2058 2059 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2060 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2061 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2062 2063 switch (left->type()) { 2064 case T_INT: { 2065 switch (code) { 2066 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break; 2067 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break; 2068 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break; 2069 default: 2070 ShouldNotReachHere(); 2071 break; 2072 } 2073 break; 2074 case T_LONG: 2075 case T_ADDRESS: 2076 case T_OBJECT: 2077 switch (code) { 2078 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break; 2079 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break; 2080 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break; 2081 default: 2082 ShouldNotReachHere(); 2083 break; 2084 } 2085 break; 2086 default: 2087 ShouldNotReachHere(); 2088 break; 2089 } 2090 } 2091 } 2092 2093 2094 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2095 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 2096 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 2097 2098 switch (left->type()) { 2099 case T_INT: { 2100 switch (code) { 2101 case lir_shl: __ lslw (dreg, lreg, count); break; 2102 case lir_shr: __ asrw (dreg, lreg, count); break; 2103 case lir_ushr: __ lsrw (dreg, lreg, count); break; 2104 default: 2105 ShouldNotReachHere(); 2106 break; 2107 } 2108 break; 2109 case T_LONG: 2110 case T_ADDRESS: 2111 case T_OBJECT: 2112 switch (code) { 2113 case lir_shl: __ lsl (dreg, lreg, count); break; 2114 case lir_shr: __ asr (dreg, lreg, count); break; 2115 case lir_ushr: __ lsr (dreg, lreg, count); break; 2116 default: 2117 ShouldNotReachHere(); 2118 break; 2119 } 2120 break; 2121 default: 2122 ShouldNotReachHere(); 2123 break; 2124 } 2125 } 2126 } 2127 2128 2129 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 2130 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2131 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2132 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2133 __ str (r, Address(sp, offset_from_rsp_in_bytes)); 2134 } 2135 2136 2137 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 2138 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2139 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2140 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2141 __ mov (rscratch1, c); 2142 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2143 } 2144 2145 2146 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 2147 ShouldNotReachHere(); 2148 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2149 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2150 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2151 __ lea(rscratch1, __ constant_oop_address(o)); 2152 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes)); 2153 } 2154 2155 2156 // This code replaces a call to arraycopy; no exception may 2157 // be thrown in this code, they must be thrown in the System.arraycopy 2158 // activation frame; we could save some checks if this would not be the case 2159 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2160 ciArrayKlass* default_type = op->expected_type(); 2161 Register src = op->src()->as_register(); 2162 Register dst = op->dst()->as_register(); 2163 Register src_pos = op->src_pos()->as_register(); 2164 Register dst_pos = op->dst_pos()->as_register(); 2165 Register length = op->length()->as_register(); 2166 Register tmp = op->tmp()->as_register(); 2167 2168 CodeStub* stub = op->stub(); 2169 int flags = op->flags(); 2170 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2171 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2172 2173 // if we don't know anything, just go through the generic arraycopy 2174 if (default_type == NULL // || basic_type == T_OBJECT 2175 ) { 2176 Label done; 2177 assert(src == r1 && src_pos == r2, "mismatch in calling convention"); 2178 2179 // Save the arguments in case the generic arraycopy fails and we 2180 // have to fall back to the JNI stub 2181 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2182 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2183 __ str(src, Address(sp, 4*BytesPerWord)); 2184 2185 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 2186 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2187 2188 // The arguments are in java calling convention so we shift them 2189 // to C convention 2190 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 2191 __ mov(c_rarg0, j_rarg0); 2192 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 2193 __ mov(c_rarg1, j_rarg1); 2194 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 2195 __ mov(c_rarg2, j_rarg2); 2196 assert_different_registers(c_rarg3, j_rarg4); 2197 __ mov(c_rarg3, j_rarg3); 2198 __ mov(c_rarg4, j_rarg4); 2199 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2200 __ mov(rscratch1, RuntimeAddress(C_entry)); 2201 __ blrt(rscratch1, 5, 0, 1); 2202 } else { 2203 #ifndef PRODUCT 2204 if (PrintC1Statistics) { 2205 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 2206 } 2207 #endif 2208 __ far_call(RuntimeAddress(copyfunc_addr)); 2209 } 2210 2211 __ cbz(r0, *stub->continuation()); 2212 2213 // Reload values from the stack so they are where the stub 2214 // expects them. 2215 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2216 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2217 __ ldr(src, Address(sp, 4*BytesPerWord)); 2218 2219 if (copyfunc_addr != NULL) { 2220 // r0 is -1^K where K == partial copied count 2221 __ eonw(rscratch1, r0, 0); 2222 // adjust length down and src/end pos up by partial copied count 2223 __ subw(length, length, rscratch1); 2224 __ addw(src_pos, src_pos, rscratch1); 2225 __ addw(dst_pos, dst_pos, rscratch1); 2226 } 2227 __ b(*stub->entry()); 2228 2229 __ bind(*stub->continuation()); 2230 return; 2231 } 2232 2233 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2234 2235 int elem_size = type2aelembytes(basic_type); 2236 int shift_amount; 2237 int scale = exact_log2(elem_size); 2238 2239 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2240 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2241 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 2242 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2243 2244 // test for NULL 2245 if (flags & LIR_OpArrayCopy::src_null_check) { 2246 __ cbz(src, *stub->entry()); 2247 } 2248 if (flags & LIR_OpArrayCopy::dst_null_check) { 2249 __ cbz(dst, *stub->entry()); 2250 } 2251 2252 // check if negative 2253 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2254 __ cmpw(src_pos, 0); 2255 __ br(Assembler::LT, *stub->entry()); 2256 } 2257 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2258 __ cmpw(dst_pos, 0); 2259 __ br(Assembler::LT, *stub->entry()); 2260 } 2261 2262 if (flags & LIR_OpArrayCopy::length_positive_check) { 2263 __ cmpw(length, 0); 2264 __ br(Assembler::LT, *stub->entry()); 2265 } 2266 2267 if (flags & LIR_OpArrayCopy::src_range_check) { 2268 __ addw(tmp, src_pos, length); 2269 __ ldrw(rscratch1, src_length_addr); 2270 __ cmpw(tmp, rscratch1); 2271 __ br(Assembler::HI, *stub->entry()); 2272 } 2273 if (flags & LIR_OpArrayCopy::dst_range_check) { 2274 __ addw(tmp, dst_pos, length); 2275 __ ldrw(rscratch1, dst_length_addr); 2276 __ cmpw(tmp, rscratch1); 2277 __ br(Assembler::HI, *stub->entry()); 2278 } 2279 2280 // FIXME: The logic in LIRGenerator::arraycopy_helper clears 2281 // length_positive_check if the source of our length operand is an 2282 // arraylength. However, that arraylength might be zero, and the 2283 // stub that we're about to call contains an assertion that count != 2284 // 0 . So we make this check purely in order not to trigger an 2285 // assertion failure. 2286 __ cbzw(length, *stub->continuation()); 2287 2288 if (flags & LIR_OpArrayCopy::type_check) { 2289 // We don't know the array types are compatible 2290 if (basic_type != T_OBJECT) { 2291 // Simple test for basic type arrays 2292 if (UseCompressedClassPointers) { 2293 __ ldrw(tmp, src_klass_addr); 2294 __ ldrw(rscratch1, dst_klass_addr); 2295 __ cmpw(tmp, rscratch1); 2296 } else { 2297 __ ldr(tmp, src_klass_addr); 2298 __ ldr(rscratch1, dst_klass_addr); 2299 __ cmp(tmp, rscratch1); 2300 } 2301 __ br(Assembler::NE, *stub->entry()); 2302 } else { 2303 // For object arrays, if src is a sub class of dst then we can 2304 // safely do the copy. 2305 Label cont, slow; 2306 2307 #define PUSH(r1, r2) \ 2308 stp(r1, r2, __ pre(sp, -2 * wordSize)); 2309 2310 #define POP(r1, r2) \ 2311 ldp(r1, r2, __ post(sp, 2 * wordSize)); 2312 2313 __ PUSH(src, dst); 2314 2315 __ load_klass(src, src); 2316 __ load_klass(dst, dst); 2317 2318 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); 2319 2320 __ PUSH(src, dst); 2321 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 2322 __ POP(src, dst); 2323 2324 __ cbnz(src, cont); 2325 2326 __ bind(slow); 2327 __ POP(src, dst); 2328 2329 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2330 if (copyfunc_addr != NULL) { // use stub if available 2331 // src is not a sub class of dst so we have to do a 2332 // per-element check. 2333 2334 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2335 if ((flags & mask) != mask) { 2336 // Check that at least both of them object arrays. 2337 assert(flags & mask, "one of the two should be known to be an object array"); 2338 2339 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2340 __ load_klass(tmp, src); 2341 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2342 __ load_klass(tmp, dst); 2343 } 2344 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2345 Address klass_lh_addr(tmp, lh_offset); 2346 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2347 __ ldrw(rscratch1, klass_lh_addr); 2348 __ mov(rscratch2, objArray_lh); 2349 __ eorw(rscratch1, rscratch1, rscratch2); 2350 __ cbnzw(rscratch1, *stub->entry()); 2351 } 2352 2353 // Spill because stubs can use any register they like and it's 2354 // easier to restore just those that we care about. 2355 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2356 __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); 2357 __ str(src, Address(sp, 4*BytesPerWord)); 2358 2359 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2360 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2361 assert_different_registers(c_rarg0, dst, dst_pos, length); 2362 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2363 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2364 assert_different_registers(c_rarg1, dst, length); 2365 __ uxtw(c_rarg2, length); 2366 assert_different_registers(c_rarg2, dst); 2367 2368 __ load_klass(c_rarg4, dst); 2369 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 2370 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 2371 __ far_call(RuntimeAddress(copyfunc_addr)); 2372 2373 #ifndef PRODUCT 2374 if (PrintC1Statistics) { 2375 Label failed; 2376 __ cbnz(r0, failed); 2377 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); 2378 __ bind(failed); 2379 } 2380 #endif 2381 2382 __ cbz(r0, *stub->continuation()); 2383 2384 #ifndef PRODUCT 2385 if (PrintC1Statistics) { 2386 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); 2387 } 2388 #endif 2389 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1); 2390 2391 // Restore previously spilled arguments 2392 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord)); 2393 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); 2394 __ ldr(src, Address(sp, 4*BytesPerWord)); 2395 2396 // return value is -1^K where K is partial copied count 2397 __ eonw(rscratch1, r0, zr); 2398 // adjust length down and src/end pos up by partial copied count 2399 __ subw(length, length, rscratch1); 2400 __ addw(src_pos, src_pos, rscratch1); 2401 __ addw(dst_pos, dst_pos, rscratch1); 2402 } 2403 2404 __ b(*stub->entry()); 2405 2406 __ bind(cont); 2407 __ POP(src, dst); 2408 } 2409 } 2410 2411 #ifdef ASSERT 2412 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2413 // Sanity check the known type with the incoming class. For the 2414 // primitive case the types must match exactly with src.klass and 2415 // dst.klass each exactly matching the default type. For the 2416 // object array case, if no type check is needed then either the 2417 // dst type is exactly the expected type and the src type is a 2418 // subtype which we can't check or src is the same array as dst 2419 // but not necessarily exactly of type default_type. 2420 Label known_ok, halt; 2421 __ mov_metadata(tmp, default_type->constant_encoding()); 2422 if (UseCompressedClassPointers) { 2423 __ encode_klass_not_null(tmp); 2424 } 2425 2426 if (basic_type != T_OBJECT) { 2427 2428 if (UseCompressedClassPointers) { 2429 __ ldrw(rscratch1, dst_klass_addr); 2430 __ cmpw(tmp, rscratch1); 2431 } else { 2432 __ ldr(rscratch1, dst_klass_addr); 2433 __ cmp(tmp, rscratch1); 2434 } 2435 __ br(Assembler::NE, halt); 2436 if (UseCompressedClassPointers) { 2437 __ ldrw(rscratch1, src_klass_addr); 2438 __ cmpw(tmp, rscratch1); 2439 } else { 2440 __ ldr(rscratch1, src_klass_addr); 2441 __ cmp(tmp, rscratch1); 2442 } 2443 __ br(Assembler::EQ, known_ok); 2444 } else { 2445 if (UseCompressedClassPointers) { 2446 __ ldrw(rscratch1, dst_klass_addr); 2447 __ cmpw(tmp, rscratch1); 2448 } else { 2449 __ ldr(rscratch1, dst_klass_addr); 2450 __ cmp(tmp, rscratch1); 2451 } 2452 __ br(Assembler::EQ, known_ok); 2453 __ cmp(src, dst); 2454 __ br(Assembler::EQ, known_ok); 2455 } 2456 __ bind(halt); 2457 __ stop("incorrect type information in arraycopy"); 2458 __ bind(known_ok); 2459 } 2460 #endif 2461 2462 #ifndef PRODUCT 2463 if (PrintC1Statistics) { 2464 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); 2465 } 2466 #endif 2467 2468 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale))); 2469 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); 2470 assert_different_registers(c_rarg0, dst, dst_pos, length); 2471 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale))); 2472 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); 2473 assert_different_registers(c_rarg1, dst, length); 2474 __ uxtw(c_rarg2, length); 2475 assert_different_registers(c_rarg2, dst); 2476 2477 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2478 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2479 const char *name; 2480 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2481 2482 CodeBlob *cb = CodeCache::find_blob(entry); 2483 if (cb) { 2484 __ far_call(RuntimeAddress(entry)); 2485 } else { 2486 __ call_VM_leaf(entry, 3); 2487 } 2488 2489 __ bind(*stub->continuation()); 2490 } 2491 2492 2493 2494 2495 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2496 Register obj = op->obj_opr()->as_register(); // may not be an oop 2497 Register hdr = op->hdr_opr()->as_register(); 2498 Register lock = op->lock_opr()->as_register(); 2499 if (!UseFastLocking) { 2500 __ b(*op->stub()->entry()); 2501 } else if (op->code() == lir_lock) { 2502 Register scratch = noreg; 2503 if (UseBiasedLocking) { 2504 scratch = op->scratch_opr()->as_register(); 2505 } 2506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2507 // add debug info for NullPointerException only if one is possible 2508 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 2509 if (op->info() != NULL) { 2510 add_debug_info_for_null_check(null_check_offset, op->info()); 2511 } 2512 // done 2513 } else if (op->code() == lir_unlock) { 2514 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2515 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2516 } else { 2517 Unimplemented(); 2518 } 2519 __ bind(*op->stub()->continuation()); 2520 } 2521 2522 2523 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2524 ciMethod* method = op->profiled_method(); 2525 int bci = op->profiled_bci(); 2526 ciMethod* callee = op->profiled_callee(); 2527 2528 // Update counter for all call types 2529 ciMethodData* md = method->method_data_or_null(); 2530 assert(md != NULL, "Sanity"); 2531 ciProfileData* data = md->bci_to_data(bci); 2532 assert(data->is_CounterData(), "need CounterData for calls"); 2533 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2534 Register mdo = op->mdo()->as_register(); 2535 __ mov_metadata(mdo, md->constant_encoding()); 2536 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2537 Bytecodes::Code bc = method->java_code_at_bci(bci); 2538 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 2539 // Perform additional virtual call profiling for invokevirtual and 2540 // invokeinterface bytecodes 2541 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2542 !callee_is_static && // required for optimized MH invokes 2543 C1ProfileVirtualCalls) { 2544 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2545 Register recv = op->recv()->as_register(); 2546 assert_different_registers(mdo, recv); 2547 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2548 ciKlass* known_klass = op->known_holder(); 2549 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2550 // We know the type that will be seen at this call site; we can 2551 // statically update the MethodData* rather than needing to do 2552 // dynamic tests on the receiver type 2553 2554 // NOTE: we should probably put a lock around this search to 2555 // avoid collisions by concurrent compilations 2556 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2557 uint i; 2558 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2559 ciKlass* receiver = vc_data->receiver(i); 2560 if (known_klass->equals(receiver)) { 2561 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2562 __ addptr(data_addr, DataLayout::counter_increment); 2563 return; 2564 } 2565 } 2566 2567 // Receiver type not found in profile data; select an empty slot 2568 2569 // Note that this is less efficient than it should be because it 2570 // always does a write to the receiver part of the 2571 // VirtualCallData rather than just the first time 2572 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2573 ciKlass* receiver = vc_data->receiver(i); 2574 if (receiver == NULL) { 2575 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2576 __ mov_metadata(rscratch1, known_klass->constant_encoding()); 2577 __ lea(rscratch2, recv_addr); 2578 __ str(rscratch1, Address(rscratch2)); 2579 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2580 __ addptr(data_addr, DataLayout::counter_increment); 2581 return; 2582 } 2583 } 2584 } else { 2585 __ load_klass(recv, recv); 2586 Label update_done; 2587 type_profile_helper(mdo, md, data, recv, &update_done); 2588 // Receiver did not match any saved receiver and there is no empty row for it. 2589 // Increment total counter to indicate polymorphic case. 2590 __ addptr(counter_addr, DataLayout::counter_increment); 2591 2592 __ bind(update_done); 2593 } 2594 } else { 2595 // Static call 2596 __ addptr(counter_addr, DataLayout::counter_increment); 2597 } 2598 } 2599 2600 2601 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2602 Unimplemented(); 2603 } 2604 2605 2606 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2607 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 2608 } 2609 2610 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2611 assert(op->crc()->is_single_cpu(), "crc must be register"); 2612 assert(op->val()->is_single_cpu(), "byte value must be register"); 2613 assert(op->result_opr()->is_single_cpu(), "result must be register"); 2614 Register crc = op->crc()->as_register(); 2615 Register val = op->val()->as_register(); 2616 Register res = op->result_opr()->as_register(); 2617 2618 assert_different_registers(val, crc, res); 2619 unsigned long offset; 2620 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2621 if (offset) __ add(res, res, offset); 2622 2623 __ ornw(crc, zr, crc); // ~crc 2624 __ update_byte_crc32(crc, val, res); 2625 __ ornw(res, zr, crc); // ~crc 2626 } 2627 2628 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2629 COMMENT("emit_profile_type {"); 2630 Register obj = op->obj()->as_register(); 2631 Register tmp = op->tmp()->as_pointer_register(); 2632 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2633 ciKlass* exact_klass = op->exact_klass(); 2634 intptr_t current_klass = op->current_klass(); 2635 bool not_null = op->not_null(); 2636 bool no_conflict = op->no_conflict(); 2637 2638 Label update, next, none; 2639 2640 bool do_null = !not_null; 2641 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2642 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2643 2644 assert(do_null || do_update, "why are we here?"); 2645 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2646 assert(mdo_addr.base() != rscratch1, "wrong register"); 2647 2648 __ verify_oop(obj); 2649 2650 if (tmp != obj) { 2651 __ mov(tmp, obj); 2652 } 2653 if (do_null) { 2654 __ cbnz(tmp, update); 2655 if (!TypeEntries::was_null_seen(current_klass)) { 2656 __ ldr(rscratch2, mdo_addr); 2657 __ orr(rscratch2, rscratch2, TypeEntries::null_seen); 2658 __ str(rscratch2, mdo_addr); 2659 } 2660 if (do_update) { 2661 #ifndef ASSERT 2662 __ b(next); 2663 } 2664 #else 2665 __ b(next); 2666 } 2667 } else { 2668 __ cbnz(tmp, update); 2669 __ stop("unexpected null obj"); 2670 #endif 2671 } 2672 2673 __ bind(update); 2674 2675 if (do_update) { 2676 #ifdef ASSERT 2677 if (exact_klass != NULL) { 2678 Label ok; 2679 __ load_klass(tmp, tmp); 2680 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2681 __ eor(rscratch1, tmp, rscratch1); 2682 __ cbz(rscratch1, ok); 2683 __ stop("exact klass and actual klass differ"); 2684 __ bind(ok); 2685 } 2686 #endif 2687 if (!no_conflict) { 2688 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2689 if (exact_klass != NULL) { 2690 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2691 } else { 2692 __ load_klass(tmp, tmp); 2693 } 2694 2695 __ ldr(rscratch2, mdo_addr); 2696 __ eor(tmp, tmp, rscratch2); 2697 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2698 // klass seen before, nothing to do. The unknown bit may have been 2699 // set already but no need to check. 2700 __ cbz(rscratch1, next); 2701 2702 __ andr(rscratch1, tmp, TypeEntries::type_unknown); 2703 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore. 2704 2705 if (TypeEntries::is_type_none(current_klass)) { 2706 __ cbz(rscratch2, none); 2707 __ cmp(rscratch2, TypeEntries::null_seen); 2708 __ br(Assembler::EQ, none); 2709 // There is a chance that the checks above (re-reading profiling 2710 // data from memory) fail if another thread has just set the 2711 // profiling to this obj's klass 2712 __ dmb(Assembler::ISHLD); 2713 __ ldr(rscratch2, mdo_addr); 2714 __ eor(tmp, tmp, rscratch2); 2715 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2716 __ cbz(rscratch1, next); 2717 } 2718 } else { 2719 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2720 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2721 2722 __ ldr(tmp, mdo_addr); 2723 __ andr(rscratch1, tmp, TypeEntries::type_unknown); 2724 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore. 2725 } 2726 2727 // different than before. Cannot keep accurate profile. 2728 __ ldr(rscratch2, mdo_addr); 2729 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown); 2730 __ str(rscratch2, mdo_addr); 2731 2732 if (TypeEntries::is_type_none(current_klass)) { 2733 __ b(next); 2734 2735 __ bind(none); 2736 // first time here. Set profile type. 2737 __ str(tmp, mdo_addr); 2738 } 2739 } else { 2740 // There's a single possible klass at this profile point 2741 assert(exact_klass != NULL, "should be"); 2742 if (TypeEntries::is_type_none(current_klass)) { 2743 __ mov_metadata(tmp, exact_klass->constant_encoding()); 2744 __ ldr(rscratch2, mdo_addr); 2745 __ eor(tmp, tmp, rscratch2); 2746 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask); 2747 __ cbz(rscratch1, next); 2748 #ifdef ASSERT 2749 { 2750 Label ok; 2751 __ ldr(rscratch1, mdo_addr); 2752 __ cbz(rscratch1, ok); 2753 __ cmp(rscratch1, TypeEntries::null_seen); 2754 __ br(Assembler::EQ, ok); 2755 // may have been set by another thread 2756 __ dmb(Assembler::ISHLD); 2757 __ mov_metadata(rscratch1, exact_klass->constant_encoding()); 2758 __ ldr(rscratch2, mdo_addr); 2759 __ eor(rscratch2, rscratch1, rscratch2); 2760 __ andr(rscratch2, rscratch2, TypeEntries::type_mask); 2761 __ cbz(rscratch2, ok); 2762 2763 __ stop("unexpected profiling mismatch"); 2764 __ bind(ok); 2765 } 2766 #endif 2767 // first time here. Set profile type. 2768 __ ldr(tmp, mdo_addr); 2769 } else { 2770 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2771 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 2772 2773 __ ldr(tmp, mdo_addr); 2774 __ andr(rscratch1, tmp, TypeEntries::type_unknown); 2775 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore. 2776 2777 __ orr(tmp, tmp, TypeEntries::type_unknown); 2778 __ str(tmp, mdo_addr); 2779 // FIXME: Write barrier needed here? 2780 } 2781 } 2782 2783 __ bind(next); 2784 } 2785 COMMENT("} emit_profile_type"); 2786 } 2787 2788 2789 void LIR_Assembler::align_backward_branch_target() { 2790 } 2791 2792 2793 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2794 if (left->is_single_cpu()) { 2795 assert(dest->is_single_cpu(), "expect single result reg"); 2796 __ negw(dest->as_register(), left->as_register()); 2797 } else if (left->is_double_cpu()) { 2798 assert(dest->is_double_cpu(), "expect double result reg"); 2799 __ neg(dest->as_register_lo(), left->as_register_lo()); 2800 } else if (left->is_single_fpu()) { 2801 assert(dest->is_single_fpu(), "expect single float result reg"); 2802 __ fnegs(dest->as_float_reg(), left->as_float_reg()); 2803 } else { 2804 assert(left->is_double_fpu(), "expect double float operand reg"); 2805 assert(dest->is_double_fpu(), "expect double float result reg"); 2806 __ fnegd(dest->as_double_reg(), left->as_double_reg()); 2807 } 2808 } 2809 2810 2811 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 2812 __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); 2813 } 2814 2815 2816 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2817 assert(!tmp->is_valid(), "don't need temporary"); 2818 2819 CodeBlob *cb = CodeCache::find_blob(dest); 2820 if (cb) { 2821 __ far_call(RuntimeAddress(dest)); 2822 } else { 2823 __ mov(rscratch1, RuntimeAddress(dest)); 2824 int len = args->length(); 2825 int type = 0; 2826 if (! result->is_illegal()) { 2827 switch (result->type()) { 2828 case T_VOID: 2829 type = 0; 2830 break; 2831 case T_INT: 2832 case T_LONG: 2833 case T_OBJECT: 2834 type = 1; 2835 break; 2836 case T_FLOAT: 2837 type = 2; 2838 break; 2839 case T_DOUBLE: 2840 type = 3; 2841 break; 2842 default: 2843 ShouldNotReachHere(); 2844 break; 2845 } 2846 } 2847 int num_gpargs = 0; 2848 int num_fpargs = 0; 2849 for (int i = 0; i < args->length(); i++) { 2850 LIR_Opr arg = args->at(i); 2851 if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) { 2852 num_fpargs++; 2853 } else { 2854 num_gpargs++; 2855 } 2856 } 2857 __ blrt(rscratch1, num_gpargs, num_fpargs, type); 2858 } 2859 2860 if (info != NULL) { 2861 add_call_info_here(info); 2862 } 2863 __ maybe_isb(); 2864 } 2865 2866 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2867 if (dest->is_address() || src->is_address()) { 2868 move_op(src, dest, type, lir_patch_none, info, 2869 /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false); 2870 } else { 2871 ShouldNotReachHere(); 2872 } 2873 } 2874 2875 #ifdef ASSERT 2876 // emit run-time assertion 2877 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2878 assert(op->code() == lir_assert, "must be"); 2879 2880 if (op->in_opr1()->is_valid()) { 2881 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2882 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2883 } else { 2884 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2885 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2886 } 2887 2888 Label ok; 2889 if (op->condition() != lir_cond_always) { 2890 Assembler::Condition acond = Assembler::AL; 2891 switch (op->condition()) { 2892 case lir_cond_equal: acond = Assembler::EQ; break; 2893 case lir_cond_notEqual: acond = Assembler::NE; break; 2894 case lir_cond_less: acond = Assembler::LT; break; 2895 case lir_cond_lessEqual: acond = Assembler::LE; break; 2896 case lir_cond_greaterEqual: acond = Assembler::GE; break; 2897 case lir_cond_greater: acond = Assembler::GT; break; 2898 case lir_cond_belowEqual: acond = Assembler::LS; break; 2899 case lir_cond_aboveEqual: acond = Assembler::HS; break; 2900 default: ShouldNotReachHere(); 2901 } 2902 __ br(acond, ok); 2903 } 2904 if (op->halt()) { 2905 const char* str = __ code_string(op->msg()); 2906 __ stop(str); 2907 } else { 2908 breakpoint(); 2909 } 2910 __ bind(ok); 2911 } 2912 #endif 2913 2914 #ifndef PRODUCT 2915 #define COMMENT(x) do { __ block_comment(x); } while (0) 2916 #else 2917 #define COMMENT(x) 2918 #endif 2919 2920 void LIR_Assembler::membar() { 2921 COMMENT("membar"); 2922 __ membar(MacroAssembler::AnyAny); 2923 } 2924 2925 void LIR_Assembler::membar_acquire() { 2926 __ membar(Assembler::LoadLoad|Assembler::LoadStore); 2927 } 2928 2929 void LIR_Assembler::membar_release() { 2930 __ membar(Assembler::LoadStore|Assembler::StoreStore); 2931 } 2932 2933 void LIR_Assembler::membar_loadload() { 2934 __ membar(Assembler::LoadLoad); 2935 } 2936 2937 void LIR_Assembler::membar_storestore() { 2938 __ membar(MacroAssembler::StoreStore); 2939 } 2940 2941 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); } 2942 2943 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); } 2944 2945 void LIR_Assembler::on_spin_wait() { 2946 Unimplemented(); 2947 } 2948 2949 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2950 __ mov(result_reg->as_register(), rthread); 2951 } 2952 2953 2954 void LIR_Assembler::peephole(LIR_List *lir) { 2955 #if 0 2956 if (tableswitch_count >= max_tableswitches) 2957 return; 2958 2959 /* 2960 This finite-state automaton recognizes sequences of compare-and- 2961 branch instructions. We will turn them into a tableswitch. You 2962 could argue that C1 really shouldn't be doing this sort of 2963 optimization, but without it the code is really horrible. 2964 */ 2965 2966 enum { start_s, cmp1_s, beq_s, cmp_s } state; 2967 int first_key, last_key = -2147483648; 2968 int next_key = 0; 2969 int start_insn = -1; 2970 int last_insn = -1; 2971 Register reg = noreg; 2972 LIR_Opr reg_opr; 2973 state = start_s; 2974 2975 LIR_OpList* inst = lir->instructions_list(); 2976 for (int i = 0; i < inst->length(); i++) { 2977 LIR_Op* op = inst->at(i); 2978 switch (state) { 2979 case start_s: 2980 first_key = -1; 2981 start_insn = i; 2982 switch (op->code()) { 2983 case lir_cmp: 2984 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 2985 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 2986 if (opr1->is_cpu_register() && opr1->is_single_cpu() 2987 && opr2->is_constant() 2988 && opr2->type() == T_INT) { 2989 reg_opr = opr1; 2990 reg = opr1->as_register(); 2991 first_key = opr2->as_constant_ptr()->as_jint(); 2992 next_key = first_key + 1; 2993 state = cmp_s; 2994 goto next_state; 2995 } 2996 break; 2997 } 2998 break; 2999 case cmp_s: 3000 switch (op->code()) { 3001 case lir_branch: 3002 if (op->as_OpBranch()->cond() == lir_cond_equal) { 3003 state = beq_s; 3004 last_insn = i; 3005 goto next_state; 3006 } 3007 } 3008 state = start_s; 3009 break; 3010 case beq_s: 3011 switch (op->code()) { 3012 case lir_cmp: { 3013 LIR_Opr opr1 = op->as_Op2()->in_opr1(); 3014 LIR_Opr opr2 = op->as_Op2()->in_opr2(); 3015 if (opr1->is_cpu_register() && opr1->is_single_cpu() 3016 && opr1->as_register() == reg 3017 && opr2->is_constant() 3018 && opr2->type() == T_INT 3019 && opr2->as_constant_ptr()->as_jint() == next_key) { 3020 last_key = next_key; 3021 next_key++; 3022 state = cmp_s; 3023 goto next_state; 3024 } 3025 } 3026 } 3027 last_key = next_key; 3028 state = start_s; 3029 break; 3030 default: 3031 assert(false, "impossible state"); 3032 } 3033 if (state == start_s) { 3034 if (first_key < last_key - 5L && reg != noreg) { 3035 { 3036 // printf("found run register %d starting at insn %d low value %d high value %d\n", 3037 // reg->encoding(), 3038 // start_insn, first_key, last_key); 3039 // for (int i = 0; i < inst->length(); i++) { 3040 // inst->at(i)->print(); 3041 // tty->print("\n"); 3042 // } 3043 // tty->print("\n"); 3044 } 3045 3046 struct tableswitch *sw = &switches[tableswitch_count]; 3047 sw->_insn_index = start_insn, sw->_first_key = first_key, 3048 sw->_last_key = last_key, sw->_reg = reg; 3049 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after)); 3050 { 3051 // Insert the new table of branches 3052 int offset = last_insn; 3053 for (int n = first_key; n < last_key; n++) { 3054 inst->insert_before 3055 (last_insn + 1, 3056 new LIR_OpBranch(lir_cond_always, T_ILLEGAL, 3057 inst->at(offset)->as_OpBranch()->label())); 3058 offset -= 2, i++; 3059 } 3060 } 3061 // Delete all the old compare-and-branch instructions 3062 for (int n = first_key; n < last_key; n++) { 3063 inst->remove_at(start_insn); 3064 inst->remove_at(start_insn); 3065 } 3066 // Insert the tableswitch instruction 3067 inst->insert_before(start_insn, 3068 new LIR_Op2(lir_cmp, lir_cond_always, 3069 LIR_OprFact::intConst(tableswitch_count), 3070 reg_opr)); 3071 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches)); 3072 tableswitch_count++; 3073 } 3074 reg = noreg; 3075 last_key = -2147483648; 3076 } 3077 next_state: 3078 ; 3079 } 3080 #endif 3081 } 3082 3083 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) { 3084 Address addr = as_Address(src->as_address_ptr(), noreg); 3085 BasicType type = src->type(); 3086 bool is_oop = type == T_OBJECT || type == T_ARRAY; 3087 3088 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr); 3089 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr); 3090 3091 switch(type) { 3092 case T_INT: 3093 xchg = &MacroAssembler::atomic_xchgalw; 3094 add = &MacroAssembler::atomic_addalw; 3095 break; 3096 case T_LONG: 3097 xchg = &MacroAssembler::atomic_xchgal; 3098 add = &MacroAssembler::atomic_addal; 3099 break; 3100 case T_OBJECT: 3101 case T_ARRAY: 3102 if (UseCompressedOops) { 3103 xchg = &MacroAssembler::atomic_xchgalw; 3104 add = &MacroAssembler::atomic_addalw; 3105 } else { 3106 xchg = &MacroAssembler::atomic_xchgal; 3107 add = &MacroAssembler::atomic_addal; 3108 } 3109 break; 3110 default: 3111 ShouldNotReachHere(); 3112 xchg = &MacroAssembler::atomic_xchgal; 3113 add = &MacroAssembler::atomic_addal; // unreachable 3114 } 3115 3116 switch (code) { 3117 case lir_xadd: 3118 { 3119 RegisterOrConstant inc; 3120 Register tmp = as_reg(tmp_op); 3121 Register dst = as_reg(dest); 3122 if (data->is_constant()) { 3123 inc = RegisterOrConstant(as_long(data)); 3124 assert_different_registers(dst, addr.base(), tmp, 3125 rscratch1, rscratch2); 3126 } else { 3127 inc = RegisterOrConstant(as_reg(data)); 3128 assert_different_registers(inc.as_register(), dst, addr.base(), tmp, 3129 rscratch1, rscratch2); 3130 } 3131 __ lea(tmp, addr); 3132 (_masm->*add)(dst, inc, tmp); 3133 break; 3134 } 3135 case lir_xchg: 3136 { 3137 Register tmp = tmp_op->as_register(); 3138 Register obj = as_reg(data); 3139 Register dst = as_reg(dest); 3140 if (is_oop && UseCompressedOops) { 3141 __ encode_heap_oop(rscratch2, obj); 3142 obj = rscratch2; 3143 } 3144 assert_different_registers(obj, addr.base(), tmp, rscratch1, dst); 3145 __ lea(tmp, addr); 3146 (_masm->*xchg)(dst, obj, tmp); 3147 if (is_oop && UseCompressedOops) { 3148 __ decode_heap_oop(dst); 3149 } 3150 } 3151 break; 3152 default: 3153 ShouldNotReachHere(); 3154 } 3155 __ membar(__ AnyAny); 3156 } 3157 3158 #undef __