1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "gc/shenandoah/brooksPointer.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/cardTableModRefBS.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "nativeInst_aarch64.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 
  63 static void select_different_registers(Register preserve,
  64                                        Register extra,
  65                                        Register &tmp1,
  66                                        Register &tmp2) {
  67   if (tmp1 == preserve) {
  68     assert_different_registers(tmp1, tmp2, extra);
  69     tmp1 = extra;
  70   } else if (tmp2 == preserve) {
  71     assert_different_registers(tmp1, tmp2, extra);
  72     tmp2 = extra;
  73   }
  74   assert_different_registers(preserve, tmp1, tmp2);
  75 }
  76 
  77 
  78 
  79 static void select_different_registers(Register preserve,
  80                                        Register extra,
  81                                        Register &tmp1,
  82                                        Register &tmp2,
  83                                        Register &tmp3) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, tmp3, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, tmp3, extra);
  89     tmp2 = extra;
  90   } else if (tmp3 == preserve) {
  91     assert_different_registers(tmp1, tmp2, tmp3, extra);
  92     tmp3 = extra;
  93   }
  94   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  95 }
  96 
  97 
  98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  99 
 100 
 101 LIR_Opr LIR_Assembler::receiverOpr() {
 102   return FrameMap::receiver_opr;
 103 }
 104 
 105 LIR_Opr LIR_Assembler::osrBufferPointer() {
 106   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 107 }
 108 
 109 //--------------fpu register translations-----------------------
 110 
 111 
 112 address LIR_Assembler::float_constant(float f) {
 113   address const_addr = __ float_constant(f);
 114   if (const_addr == NULL) {
 115     bailout("const section overflow");
 116     return __ code()->consts()->start();
 117   } else {
 118     return const_addr;
 119   }
 120 }
 121 
 122 
 123 address LIR_Assembler::double_constant(double d) {
 124   address const_addr = __ double_constant(d);
 125   if (const_addr == NULL) {
 126     bailout("const section overflow");
 127     return __ code()->consts()->start();
 128   } else {
 129     return const_addr;
 130   }
 131 }
 132 
 133 address LIR_Assembler::int_constant(jlong n) {
 134   address const_addr = __ long_constant(n);
 135   if (const_addr == NULL) {
 136     bailout("const section overflow");
 137     return __ code()->consts()->start();
 138   } else {
 139     return const_addr;
 140   }
 141 }
 142 
 143 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
 144 
 145 void LIR_Assembler::reset_FPU() { Unimplemented(); }
 146 
 147 void LIR_Assembler::fpop() { Unimplemented(); }
 148 
 149 void LIR_Assembler::fxch(int i) { Unimplemented(); }
 150 
 151 void LIR_Assembler::fld(int i) { Unimplemented(); }
 152 
 153 void LIR_Assembler::ffree(int i) { Unimplemented(); }
 154 
 155 void LIR_Assembler::breakpoint() { Unimplemented(); }
 156 
 157 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 158 
 159 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 160 
 161 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 162 //-------------------------------------------
 163 
 164 static Register as_reg(LIR_Opr op) {
 165   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 166 }
 167 
 168 static jlong as_long(LIR_Opr data) {
 169   jlong result;
 170   switch (data->type()) {
 171   case T_INT:
 172     result = (data->as_jint());
 173     break;
 174   case T_LONG:
 175     result = (data->as_jlong());
 176     break;
 177   default:
 178     ShouldNotReachHere();
 179     result = 0;  // unreachable
 180   }
 181   return result;
 182 }
 183 
 184 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 185   Register base = addr->base()->as_pointer_register();
 186   LIR_Opr opr = addr->index();
 187   if (opr->is_cpu_register()) {
 188     Register index;
 189     if (opr->is_single_cpu())
 190       index = opr->as_register();
 191     else
 192       index = opr->as_register_lo();
 193     assert(addr->disp() == 0, "must be");
 194     switch(opr->type()) {
 195       case T_INT:
 196         return Address(base, index, Address::sxtw(addr->scale()));
 197       case T_LONG:
 198         return Address(base, index, Address::lsl(addr->scale()));
 199       default:
 200         ShouldNotReachHere();
 201       }
 202   } else  {
 203     intptr_t addr_offset = intptr_t(addr->disp());
 204     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
 205       return Address(base, addr_offset, Address::lsl(addr->scale()));
 206     else {
 207       __ mov(tmp, addr_offset);
 208       return Address(base, tmp, Address::lsl(addr->scale()));
 209     }
 210   }
 211   return Address();
 212 }
 213 
 214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 215   ShouldNotReachHere();
 216   return Address();
 217 }
 218 
 219 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 220   return as_Address(addr, rscratch1);
 221 }
 222 
 223 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 224   return as_Address(addr, rscratch1);  // Ouch
 225   // FIXME: This needs to be much more clever.  See x86.
 226 }
 227 
 228 
 229 void LIR_Assembler::osr_entry() {
 230   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 231   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 232   ValueStack* entry_state = osr_entry->state();
 233   int number_of_locks = entry_state->locks_size();
 234 
 235   // we jump here if osr happens with the interpreter
 236   // state set up to continue at the beginning of the
 237   // loop that triggered osr - in particular, we have
 238   // the following registers setup:
 239   //
 240   // r2: osr buffer
 241   //
 242 
 243   // build frame
 244   ciMethod* m = compilation()->method();
 245   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 246 
 247   // OSR buffer is
 248   //
 249   // locals[nlocals-1..0]
 250   // monitors[0..number_of_locks]
 251   //
 252   // locals is a direct copy of the interpreter frame so in the osr buffer
 253   // so first slot in the local array is the last local from the interpreter
 254   // and last slot is local[0] (receiver) from the interpreter
 255   //
 256   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 257   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 258   // in the interpreter frame (the method lock if a sync method)
 259 
 260   // Initialize monitors in the compiled activation.
 261   //   r2: pointer to osr buffer
 262   //
 263   // All other registers are dead at this point and the locals will be
 264   // copied into place by code emitted in the IR.
 265 
 266   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 267   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 268     int monitor_offset = BytesPerWord * method()->max_locals() +
 269       (2 * BytesPerWord) * (number_of_locks - 1);
 270     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 271     // the OSR buffer using 2 word entries: first the lock and then
 272     // the oop.
 273     for (int i = 0; i < number_of_locks; i++) {
 274       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 275 #ifdef ASSERT
 276       // verify the interpreter's monitor has a non-null object
 277       {
 278         Label L;
 279         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 280         __ cbnz(rscratch1, L);
 281         __ stop("locked object is NULL");
 282         __ bind(L);
 283       }
 284 #endif
 285       __ ldr(r19, Address(OSR_buf, slot_offset + 0));
 286       __ str(r19, frame_map()->address_for_monitor_lock(i));
 287       __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 288       __ str(r19, frame_map()->address_for_monitor_object(i));
 289     }
 290   }
 291 }
 292 
 293 
 294 // inline cache check; done before the frame is built.
 295 int LIR_Assembler::check_icache() {
 296   Register receiver = FrameMap::receiver_opr->as_register();
 297   Register ic_klass = IC_Klass;
 298   int start_offset = __ offset();
 299   __ inline_cache_check(receiver, ic_klass);
 300 
 301   // if icache check fails, then jump to runtime routine
 302   // Note: RECEIVER must still contain the receiver!
 303   Label dont;
 304   __ br(Assembler::EQ, dont);
 305   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 306 
 307   // We align the verified entry point unless the method body
 308   // (including its inline cache check) will fit in a single 64-byte
 309   // icache line.
 310   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 311     // force alignment after the cache check.
 312     __ align(CodeEntryAlignment);
 313   }
 314 
 315   __ bind(dont);
 316   return start_offset;
 317 }
 318 
 319 
 320 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 321   if (o == NULL) {
 322     __ mov(reg, zr);
 323   } else {
 324     __ movoop(reg, o, /*immediate*/true);
 325   }
 326 }
 327 
 328 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 329   address target = NULL;
 330   relocInfo::relocType reloc_type = relocInfo::none;
 331 
 332   switch (patching_id(info)) {
 333   case PatchingStub::access_field_id:
 334     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 335     reloc_type = relocInfo::section_word_type;
 336     break;
 337   case PatchingStub::load_klass_id:
 338     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 339     reloc_type = relocInfo::metadata_type;
 340     break;
 341   case PatchingStub::load_mirror_id:
 342     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 343     reloc_type = relocInfo::oop_type;
 344     break;
 345   case PatchingStub::load_appendix_id:
 346     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 347     reloc_type = relocInfo::oop_type;
 348     break;
 349   default: ShouldNotReachHere();
 350   }
 351 
 352   __ far_call(RuntimeAddress(target));
 353   add_call_info_here(info);
 354 }
 355 
 356 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 357   deoptimize_trap(info);
 358 }
 359 
 360 
 361 // This specifies the rsp decrement needed to build the frame
 362 int LIR_Assembler::initial_frame_size_in_bytes() const {
 363   // if rounding, must let FrameMap know!
 364 
 365   // The frame_map records size in slots (32bit word)
 366 
 367   // subtract two words to account for return address and link
 368   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 369 }
 370 
 371 
 372 int LIR_Assembler::emit_exception_handler() {
 373   // if the last instruction is a call (typically to do a throw which
 374   // is coming at the end after block reordering) the return address
 375   // must still point into the code area in order to avoid assertion
 376   // failures when searching for the corresponding bci => add a nop
 377   // (was bug 5/14/1999 - gri)
 378   __ nop();
 379 
 380   // generate code for exception handler
 381   address handler_base = __ start_a_stub(exception_handler_size);
 382   if (handler_base == NULL) {
 383     // not enough space left for the handler
 384     bailout("exception handler overflow");
 385     return -1;
 386   }
 387 
 388   int offset = code_offset();
 389 
 390   // the exception oop and pc are in r0, and r3
 391   // no other registers need to be preserved, so invalidate them
 392   __ invalidate_registers(false, true, true, false, true, true);
 393 
 394   // check that there is really an exception
 395   __ verify_not_null_oop(r0);
 396 
 397   // search an exception handler (r0: exception oop, r3: throwing pc)
 398   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 399   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 400   __ end_a_stub();
 401 
 402   return offset;
 403 }
 404 
 405 
 406 // Emit the code to remove the frame from the stack in the exception
 407 // unwind path.
 408 int LIR_Assembler::emit_unwind_handler() {
 409 #ifndef PRODUCT
 410   if (CommentedAssembly) {
 411     _masm->block_comment("Unwind handler");
 412   }
 413 #endif
 414 
 415   int offset = code_offset();
 416 
 417   // Fetch the exception from TLS and clear out exception related thread state
 418   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 419   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 420   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 421 
 422   __ bind(_unwind_handler_entry);
 423   __ verify_not_null_oop(r0);
 424   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 425     __ mov(r19, r0);  // Preserve the exception
 426   }
 427 
 428   // Preform needed unlocking
 429   MonitorExitStub* stub = NULL;
 430   if (method()->is_synchronized()) {
 431     monitor_address(0, FrameMap::r0_opr);
 432     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 433     __ unlock_object(r5, r4, r0, *stub->entry());
 434     __ bind(*stub->continuation());
 435   }
 436 
 437   if (compilation()->env()->dtrace_method_probes()) {
 438     __ call_Unimplemented();
 439 #if 0
 440     __ movptr(Address(rsp, 0), rax);
 441     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 442     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 443 #endif
 444   }
 445 
 446   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 447     __ mov(r0, r19);  // Restore the exception
 448   }
 449 
 450   // remove the activation and dispatch to the unwind handler
 451   __ block_comment("remove_frame and dispatch to the unwind handler");
 452   __ remove_frame(initial_frame_size_in_bytes());
 453   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 454 
 455   // Emit the slow path assembly
 456   if (stub != NULL) {
 457     stub->emit_code(this);
 458   }
 459 
 460   return offset;
 461 }
 462 
 463 
 464 int LIR_Assembler::emit_deopt_handler() {
 465   // if the last instruction is a call (typically to do a throw which
 466   // is coming at the end after block reordering) the return address
 467   // must still point into the code area in order to avoid assertion
 468   // failures when searching for the corresponding bci => add a nop
 469   // (was bug 5/14/1999 - gri)
 470   __ nop();
 471 
 472   // generate code for exception handler
 473   address handler_base = __ start_a_stub(deopt_handler_size);
 474   if (handler_base == NULL) {
 475     // not enough space left for the handler
 476     bailout("deopt handler overflow");
 477     return -1;
 478   }
 479 
 480   int offset = code_offset();
 481 
 482   __ adr(lr, pc());
 483   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 484   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 485   __ end_a_stub();
 486 
 487   return offset;
 488 }
 489 
 490 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 491   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 492   int pc_offset = code_offset();
 493   flush_debug_info(pc_offset);
 494   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 495   if (info->exception_handlers() != NULL) {
 496     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 497   }
 498 }
 499 
 500 // Rather than take a segfault when the polling page is protected,
 501 // explicitly check for a safepoint in progress and if there is one,
 502 // fake a call to the handler as if a segfault had been caught.
 503 void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
 504   __ mov(rscratch1, SafepointSynchronize::address_of_state());
 505   __ ldrb(rscratch1, Address(rscratch1));
 506   Label nope, poll;
 507   __ cbz(rscratch1, nope);
 508   __ block_comment("safepoint");
 509   __ enter();
 510   __ push(0x3, sp);                // r0 & r1
 511   __ push(0x3ffffffc, sp);         // integer registers except lr & sp & r0 & r1
 512   __ adr(r0, poll);
 513   __ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
 514   __ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));
 515   __ blrt(rscratch1, 1, 0, 1);
 516   __ maybe_isb();
 517   __ pop(0x3ffffffc, sp);          // integer registers except lr & sp & r0 & r1
 518   __ mov(rscratch1, r0);
 519   __ pop(0x3, sp);                 // r0 & r1
 520   __ leave();
 521   __ br(rscratch1);
 522   address polling_page(os::get_polling_page());
 523   assert(os::is_poll_address(polling_page), "should be");
 524   unsigned long off;
 525   __ adrp(rscratch1, Address(polling_page, rtype), off);
 526   __ bind(poll);
 527   if (info)
 528     add_debug_info_for_branch(info);  // This isn't just debug info:
 529                                       // it's the oop map
 530   else
 531     __ code_section()->relocate(pc(), rtype);
 532   __ ldrw(zr, Address(rscratch1, off));
 533   __ bind(nope);
 534 }
 535 
 536 void LIR_Assembler::return_op(LIR_Opr result) {
 537   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 538   // Pop the stack before the safepoint code
 539   __ remove_frame(initial_frame_size_in_bytes());
 540   address polling_page(os::get_polling_page());
 541   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 542   __ ret(lr);
 543 }
 544 
 545 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 546   address polling_page(os::get_polling_page());
 547   guarantee(info != NULL, "Shouldn't be NULL");
 548   assert(os::is_poll_address(polling_page), "should be");
 549   unsigned long off;
 550   __ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
 551   assert(off == 0, "must be");
 552   add_debug_info_for_branch(info);  // This isn't just debug info:
 553   // it's the oop map
 554   __ read_polling_page(rscratch1, relocInfo::poll_type);
 555   return __ offset();
 556 }
 557 
 558 
 559 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 560   if (from_reg == r31_sp)
 561     from_reg = sp;
 562   if (to_reg == r31_sp)
 563     to_reg = sp;
 564   __ mov(to_reg, from_reg);
 565 }
 566 
 567 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 568 
 569 
 570 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 571   assert(src->is_constant(), "should not call otherwise");
 572   assert(dest->is_register(), "should not call otherwise");
 573   LIR_Const* c = src->as_constant_ptr();
 574 
 575   switch (c->type()) {
 576     case T_INT: {
 577       assert(patch_code == lir_patch_none, "no patching handled here");
 578       __ movw(dest->as_register(), c->as_jint());
 579       break;
 580     }
 581 
 582     case T_ADDRESS: {
 583       assert(patch_code == lir_patch_none, "no patching handled here");
 584       __ mov(dest->as_register(), c->as_jint());
 585       break;
 586     }
 587 
 588     case T_LONG: {
 589       assert(patch_code == lir_patch_none, "no patching handled here");
 590       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 591       break;
 592     }
 593 
 594     case T_OBJECT: {
 595         if (patch_code == lir_patch_none) {
 596           jobject2reg(c->as_jobject(), dest->as_register());
 597         } else {
 598           jobject2reg_with_patching(dest->as_register(), info);
 599         }
 600       break;
 601     }
 602 
 603     case T_METADATA: {
 604       if (patch_code != lir_patch_none) {
 605         klass2reg_with_patching(dest->as_register(), info);
 606       } else {
 607         __ mov_metadata(dest->as_register(), c->as_metadata());
 608       }
 609       break;
 610     }
 611 
 612     case T_FLOAT: {
 613       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 614         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 615       } else {
 616         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 617         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 618       }
 619       break;
 620     }
 621 
 622     case T_DOUBLE: {
 623       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 624         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 625       } else {
 626         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 627         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 628       }
 629       break;
 630     }
 631 
 632     default:
 633       ShouldNotReachHere();
 634   }
 635 }
 636 
 637 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 638   LIR_Const* c = src->as_constant_ptr();
 639   switch (c->type()) {
 640   case T_OBJECT:
 641     {
 642       if (! c->as_jobject())
 643         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 644       else {
 645         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 646         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 647       }
 648     }
 649     break;
 650   case T_ADDRESS:
 651     {
 652       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 653       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 654     }
 655   case T_INT:
 656   case T_FLOAT:
 657     {
 658       Register reg = zr;
 659       if (c->as_jint_bits() == 0)
 660         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 661       else {
 662         __ movw(rscratch1, c->as_jint_bits());
 663         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 664       }
 665     }
 666     break;
 667   case T_LONG:
 668   case T_DOUBLE:
 669     {
 670       Register reg = zr;
 671       if (c->as_jlong_bits() == 0)
 672         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 673                                                  lo_word_offset_in_bytes));
 674       else {
 675         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 676         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 677                                                         lo_word_offset_in_bytes));
 678       }
 679     }
 680     break;
 681   default:
 682     ShouldNotReachHere();
 683   }
 684 }
 685 
 686 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 687   assert(src->is_constant(), "should not call otherwise");
 688   LIR_Const* c = src->as_constant_ptr();
 689   LIR_Address* to_addr = dest->as_address_ptr();
 690 
 691   void (Assembler::* insn)(Register Rt, const Address &adr);
 692 
 693   switch (type) {
 694   case T_ADDRESS:
 695     assert(c->as_jint() == 0, "should be");
 696     insn = &Assembler::str;
 697     break;
 698   case T_LONG:
 699     assert(c->as_jlong() == 0, "should be");
 700     insn = &Assembler::str;
 701     break;
 702   case T_INT:
 703     assert(c->as_jint() == 0, "should be");
 704     insn = &Assembler::strw;
 705     break;
 706   case T_OBJECT:
 707   case T_ARRAY:
 708     assert(c->as_jobject() == 0, "should be");
 709     if (UseCompressedOops && !wide) {
 710       insn = &Assembler::strw;
 711     } else {
 712       insn = &Assembler::str;
 713     }
 714     break;
 715   case T_CHAR:
 716   case T_SHORT:
 717     assert(c->as_jint() == 0, "should be");
 718     insn = &Assembler::strh;
 719     break;
 720   case T_BOOLEAN:
 721   case T_BYTE:
 722     assert(c->as_jint() == 0, "should be");
 723     insn = &Assembler::strb;
 724     break;
 725   default:
 726     ShouldNotReachHere();
 727     insn = &Assembler::str;  // unreachable
 728   }
 729 
 730   if (info) add_debug_info_for_null_check_here(info);
 731   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 732 }
 733 
 734 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 735   assert(src->is_register(), "should not call otherwise");
 736   assert(dest->is_register(), "should not call otherwise");
 737 
 738   // move between cpu-registers
 739   if (dest->is_single_cpu()) {
 740     if (src->type() == T_LONG) {
 741       // Can do LONG -> OBJECT
 742       move_regs(src->as_register_lo(), dest->as_register());
 743       return;
 744     }
 745     assert(src->is_single_cpu(), "must match");
 746     if (src->type() == T_OBJECT) {
 747       __ verify_oop(src->as_register());
 748     }
 749     move_regs(src->as_register(), dest->as_register());
 750 
 751   } else if (dest->is_double_cpu()) {
 752     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 753       // Surprising to me but we can see move of a long to t_object
 754       __ verify_oop(src->as_register());
 755       move_regs(src->as_register(), dest->as_register_lo());
 756       return;
 757     }
 758     assert(src->is_double_cpu(), "must match");
 759     Register f_lo = src->as_register_lo();
 760     Register f_hi = src->as_register_hi();
 761     Register t_lo = dest->as_register_lo();
 762     Register t_hi = dest->as_register_hi();
 763     assert(f_hi == f_lo, "must be same");
 764     assert(t_hi == t_lo, "must be same");
 765     move_regs(f_lo, t_lo);
 766 
 767   } else if (dest->is_single_fpu()) {
 768     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 769 
 770   } else if (dest->is_double_fpu()) {
 771     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 772 
 773   } else {
 774     ShouldNotReachHere();
 775   }
 776 }
 777 
 778 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 779   if (src->is_single_cpu()) {
 780     if (type == T_ARRAY || type == T_OBJECT) {
 781       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 782       __ verify_oop(src->as_register());
 783     } else if (type == T_METADATA || type == T_DOUBLE) {
 784       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 785     } else {
 786       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 787     }
 788 
 789   } else if (src->is_double_cpu()) {
 790     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 791     __ str(src->as_register_lo(), dest_addr_LO);
 792 
 793   } else if (src->is_single_fpu()) {
 794     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 795     __ strs(src->as_float_reg(), dest_addr);
 796 
 797   } else if (src->is_double_fpu()) {
 798     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 799     __ strd(src->as_double_reg(), dest_addr);
 800 
 801   } else {
 802     ShouldNotReachHere();
 803   }
 804 
 805 }
 806 
 807 
 808 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 809   LIR_Address* to_addr = dest->as_address_ptr();
 810   PatchingStub* patch = NULL;
 811   Register compressed_src = rscratch1;
 812 
 813   if (patch_code != lir_patch_none) {
 814     deoptimize_trap(info);
 815     return;
 816   }
 817 
 818   if (type == T_ARRAY || type == T_OBJECT) {
 819     __ verify_oop(src->as_register());
 820 
 821     __ shenandoah_store_check(as_Address(to_addr), src->as_register());
 822 
 823     if (UseCompressedOops && !wide) {
 824       __ encode_heap_oop(compressed_src, src->as_register());
 825     } else {
 826       compressed_src = src->as_register();
 827     }
 828   } else {
 829     __ shenandoah_store_addr_check(to_addr->base()->as_pointer_register());
 830   }
 831 
 832   int null_check_here = code_offset();
 833   switch (type) {
 834     case T_FLOAT: {
 835       __ strs(src->as_float_reg(), as_Address(to_addr));
 836       break;
 837     }
 838 
 839     case T_DOUBLE: {
 840       __ strd(src->as_double_reg(), as_Address(to_addr));
 841       break;
 842     }
 843 
 844     case T_ARRAY:   // fall through
 845     case T_OBJECT:  // fall through
 846       if (UseCompressedOops && !wide) {
 847         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 848       } else {
 849         __ str(compressed_src, as_Address(to_addr));
 850       }
 851       break;
 852     case T_METADATA:
 853       // We get here to store a method pointer to the stack to pass to
 854       // a dtrace runtime call. This can't work on 64 bit with
 855       // compressed klass ptrs: T_METADATA can be a compressed klass
 856       // ptr or a 64 bit method pointer.
 857       ShouldNotReachHere();
 858       __ str(src->as_register(), as_Address(to_addr));
 859       break;
 860     case T_ADDRESS:
 861       __ str(src->as_register(), as_Address(to_addr));
 862       break;
 863     case T_INT:
 864       __ strw(src->as_register(), as_Address(to_addr));
 865       break;
 866 
 867     case T_LONG: {
 868       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 869       break;
 870     }
 871 
 872     case T_BYTE:    // fall through
 873     case T_BOOLEAN: {
 874       __ strb(src->as_register(), as_Address(to_addr));
 875       break;
 876     }
 877 
 878     case T_CHAR:    // fall through
 879     case T_SHORT:
 880       __ strh(src->as_register(), as_Address(to_addr));
 881       break;
 882 
 883     default:
 884       ShouldNotReachHere();
 885   }
 886   if (info != NULL) {
 887     add_debug_info_for_null_check(null_check_here, info);
 888   }
 889 }
 890 
 891 
 892 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 893   assert(src->is_stack(), "should not call otherwise");
 894   assert(dest->is_register(), "should not call otherwise");
 895 
 896   if (dest->is_single_cpu()) {
 897     if (type == T_ARRAY || type == T_OBJECT) {
 898       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 899       __ verify_oop(dest->as_register());
 900     } else if (type == T_METADATA) {
 901       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 902     } else {
 903       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 904     }
 905 
 906   } else if (dest->is_double_cpu()) {
 907     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 908     __ ldr(dest->as_register_lo(), src_addr_LO);
 909 
 910   } else if (dest->is_single_fpu()) {
 911     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 912     __ ldrs(dest->as_float_reg(), src_addr);
 913 
 914   } else if (dest->is_double_fpu()) {
 915     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 916     __ ldrd(dest->as_double_reg(), src_addr);
 917 
 918   } else {
 919     ShouldNotReachHere();
 920   }
 921 }
 922 
 923 
 924 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 925   address target = NULL;
 926   relocInfo::relocType reloc_type = relocInfo::none;
 927 
 928   switch (patching_id(info)) {
 929   case PatchingStub::access_field_id:
 930     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 931     reloc_type = relocInfo::section_word_type;
 932     break;
 933   case PatchingStub::load_klass_id:
 934     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 935     reloc_type = relocInfo::metadata_type;
 936     break;
 937   case PatchingStub::load_mirror_id:
 938     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 939     reloc_type = relocInfo::oop_type;
 940     break;
 941   case PatchingStub::load_appendix_id:
 942     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 943     reloc_type = relocInfo::oop_type;
 944     break;
 945   default: ShouldNotReachHere();
 946   }
 947 
 948   __ far_call(RuntimeAddress(target));
 949   add_call_info_here(info);
 950 }
 951 
 952 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 953 
 954   LIR_Opr temp;
 955   if (type == T_LONG || type == T_DOUBLE)
 956     temp = FrameMap::rscratch1_long_opr;
 957   else
 958     temp = FrameMap::rscratch1_opr;
 959 
 960   stack2reg(src, temp, src->type());
 961   reg2stack(temp, dest, dest->type(), false);
 962 }
 963 
 964 
 965 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 966   LIR_Address* addr = src->as_address_ptr();
 967   LIR_Address* from_addr = src->as_address_ptr();
 968 
 969   if (addr->base()->type() == T_OBJECT) {
 970     __ verify_oop(addr->base()->as_pointer_register());
 971   }
 972 
 973   if (patch_code != lir_patch_none) {
 974     deoptimize_trap(info);
 975     return;
 976   }
 977 
 978   if (info != NULL) {
 979     add_debug_info_for_null_check_here(info);
 980   }
 981   int null_check_here = code_offset();
 982   switch (type) {
 983     case T_FLOAT: {
 984       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 985       break;
 986     }
 987 
 988     case T_DOUBLE: {
 989       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 990       break;
 991     }
 992 
 993     case T_ARRAY:   // fall through
 994     case T_OBJECT:  // fall through
 995       if (UseCompressedOops && !wide) {
 996         __ ldrw(dest->as_register(), as_Address(from_addr));
 997       } else {
 998          __ ldr(dest->as_register(), as_Address(from_addr));
 999       }
1000       break;
1001     case T_METADATA:
1002       // We get here to store a method pointer to the stack to pass to
1003       // a dtrace runtime call. This can't work on 64 bit with
1004       // compressed klass ptrs: T_METADATA can be a compressed klass
1005       // ptr or a 64 bit method pointer.
1006       ShouldNotReachHere();
1007       __ ldr(dest->as_register(), as_Address(from_addr));
1008       break;
1009     case T_ADDRESS:
1010       // FIXME: OMG this is a horrible kludge.  Any offset from an
1011       // address that matches klass_offset_in_bytes() will be loaded
1012       // as a word, not a long.
1013       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1014         __ ldrw(dest->as_register(), as_Address(from_addr));
1015       } else {
1016         __ ldr(dest->as_register(), as_Address(from_addr));
1017       }
1018       break;
1019     case T_INT:
1020       __ ldrw(dest->as_register(), as_Address(from_addr));
1021       break;
1022 
1023     case T_LONG: {
1024       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
1025       break;
1026     }
1027 
1028     case T_BYTE:
1029       __ ldrsb(dest->as_register(), as_Address(from_addr));
1030       break;
1031     case T_BOOLEAN: {
1032       __ ldrb(dest->as_register(), as_Address(from_addr));
1033       break;
1034     }
1035 
1036     case T_CHAR:
1037       __ ldrh(dest->as_register(), as_Address(from_addr));
1038       break;
1039     case T_SHORT:
1040       __ ldrsh(dest->as_register(), as_Address(from_addr));
1041       break;
1042 
1043     default:
1044       ShouldNotReachHere();
1045   }
1046 
1047   if (type == T_ARRAY || type == T_OBJECT) {
1048     if (UseCompressedOops && !wide) {
1049       __ decode_heap_oop(dest->as_register());
1050     }
1051     __ verify_oop(dest->as_register());
1052   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1053     if (UseCompressedClassPointers) {
1054       __ decode_klass_not_null(dest->as_register());
1055     }
1056   }
1057 }
1058 
1059 
1060 int LIR_Assembler::array_element_size(BasicType type) const {
1061   int elem_size = type2aelembytes(type);
1062   return exact_log2(elem_size);
1063 }
1064 
1065 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1066   Register Rdividend = op->in_opr1()->as_register();
1067   Register Rdivisor  = op->in_opr2()->as_register();
1068   Register Rscratch  = op->in_opr3()->as_register();
1069   Register Rresult   = op->result_opr()->as_register();
1070   int divisor = -1;
1071 
1072   /*
1073   TODO: For some reason, using the Rscratch that gets passed in is
1074   not possible because the register allocator does not see the tmp reg
1075   as used, and assignes it the same register as Rdividend. We use rscratch1
1076    instead.
1077 
1078   assert(Rdividend != Rscratch, "");
1079   assert(Rdivisor  != Rscratch, "");
1080   */
1081 
1082   if (Rdivisor == noreg && is_power_of_2(divisor)) {
1083     // convert division by a power of two into some shifts and logical operations
1084   }
1085 
1086   if (op->code() == lir_irem) {
1087     __ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1);
1088    } else if (op->code() == lir_idiv) {
1089     __ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1);
1090   } else
1091     ShouldNotReachHere();
1092 }
1093 
1094 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1095 #ifdef ASSERT
1096   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1097   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1098   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1099 #endif
1100 
1101   if (op->cond() == lir_cond_always) {
1102     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1103     __ b(*(op->label()));
1104   } else {
1105     Assembler::Condition acond;
1106     if (op->code() == lir_cond_float_branch) {
1107       bool is_unordered = (op->ublock() == op->block());
1108       // Assembler::EQ does not permit unordered branches, so we add
1109       // another branch here.  Likewise, Assembler::NE does not permit
1110       // ordered branches.
1111       if (is_unordered && op->cond() == lir_cond_equal
1112           || !is_unordered && op->cond() == lir_cond_notEqual)
1113         __ br(Assembler::VS, *(op->ublock()->label()));
1114       switch(op->cond()) {
1115       case lir_cond_equal:        acond = Assembler::EQ; break;
1116       case lir_cond_notEqual:     acond = Assembler::NE; break;
1117       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1118       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1119       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1120       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1121       default:                    ShouldNotReachHere();
1122         acond = Assembler::EQ;  // unreachable
1123       }
1124     } else {
1125       switch (op->cond()) {
1126         case lir_cond_equal:        acond = Assembler::EQ; break;
1127         case lir_cond_notEqual:     acond = Assembler::NE; break;
1128         case lir_cond_less:         acond = Assembler::LT; break;
1129         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1130         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1131         case lir_cond_greater:      acond = Assembler::GT; break;
1132         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1133         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1134         default:                    ShouldNotReachHere();
1135           acond = Assembler::EQ;  // unreachable
1136       }
1137     }
1138     __ br(acond,*(op->label()));
1139   }
1140 }
1141 
1142 
1143 void LIR_Assembler::emit_opShenandoahWriteBarrier(LIR_OpShenandoahWriteBarrier* op) {
1144 
1145   Register obj = op->in_opr()->as_register();
1146   Register res = op->result_opr()->as_register();
1147 
1148   Label done;
1149 
1150   __ block_comment("Shenandoah write barrier {");
1151 
1152   if (res != obj) {
1153     __ mov(res, obj);
1154   }
1155   // Check for null.
1156   if (op->need_null_check()) {
1157     __ cbz(res, done);
1158   }
1159 
1160   __ shenandoah_write_barrier(res);
1161 
1162   __ bind(done);
1163 
1164   __ block_comment("} Shenandoah write barrier");
1165 
1166 }
1167 
1168 
1169 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1170   LIR_Opr src  = op->in_opr();
1171   LIR_Opr dest = op->result_opr();
1172 
1173   switch (op->bytecode()) {
1174     case Bytecodes::_i2f:
1175       {
1176         __ scvtfws(dest->as_float_reg(), src->as_register());
1177         break;
1178       }
1179     case Bytecodes::_i2d:
1180       {
1181         __ scvtfwd(dest->as_double_reg(), src->as_register());
1182         break;
1183       }
1184     case Bytecodes::_l2d:
1185       {
1186         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1187         break;
1188       }
1189     case Bytecodes::_l2f:
1190       {
1191         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1192         break;
1193       }
1194     case Bytecodes::_f2d:
1195       {
1196         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1197         break;
1198       }
1199     case Bytecodes::_d2f:
1200       {
1201         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1202         break;
1203       }
1204     case Bytecodes::_i2c:
1205       {
1206         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1207         break;
1208       }
1209     case Bytecodes::_i2l:
1210       {
1211         __ sxtw(dest->as_register_lo(), src->as_register());
1212         break;
1213       }
1214     case Bytecodes::_i2s:
1215       {
1216         __ sxth(dest->as_register(), src->as_register());
1217         break;
1218       }
1219     case Bytecodes::_i2b:
1220       {
1221         __ sxtb(dest->as_register(), src->as_register());
1222         break;
1223       }
1224     case Bytecodes::_l2i:
1225       {
1226         _masm->block_comment("FIXME: This could be a no-op");
1227         __ uxtw(dest->as_register(), src->as_register_lo());
1228         break;
1229       }
1230     case Bytecodes::_d2l:
1231       {
1232         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1233         break;
1234       }
1235     case Bytecodes::_f2i:
1236       {
1237         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1238         break;
1239       }
1240     case Bytecodes::_f2l:
1241       {
1242         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1243         break;
1244       }
1245     case Bytecodes::_d2i:
1246       {
1247         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1248         break;
1249       }
1250     default: ShouldNotReachHere();
1251   }
1252 }
1253 
1254 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1255   if (op->init_check()) {
1256     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1257                                InstanceKlass::init_state_offset()));
1258     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1259     add_debug_info_for_null_check_here(op->stub()->info());
1260     __ br(Assembler::NE, *op->stub()->entry());
1261   }
1262   __ allocate_object(op->obj()->as_register(),
1263                      op->tmp1()->as_register(),
1264                      op->tmp2()->as_register(),
1265                      op->header_size(),
1266                      op->object_size(),
1267                      op->klass()->as_register(),
1268                      *op->stub()->entry());
1269   __ bind(*op->stub()->continuation());
1270 }
1271 
1272 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1273   Register len =  op->len()->as_register();
1274   __ uxtw(len, len);
1275 
1276   if (UseSlowPath ||
1277       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1278       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1279     __ b(*op->stub()->entry());
1280   } else {
1281     Register tmp1 = op->tmp1()->as_register();
1282     Register tmp2 = op->tmp2()->as_register();
1283     Register tmp3 = op->tmp3()->as_register();
1284     if (len == tmp1) {
1285       tmp1 = tmp3;
1286     } else if (len == tmp2) {
1287       tmp2 = tmp3;
1288     } else if (len == tmp3) {
1289       // everything is ok
1290     } else {
1291       __ mov(tmp3, len);
1292     }
1293     __ allocate_array(op->obj()->as_register(),
1294                       len,
1295                       tmp1,
1296                       tmp2,
1297                       arrayOopDesc::header_size(op->type()),
1298                       array_element_size(op->type()),
1299                       op->klass()->as_register(),
1300                       *op->stub()->entry());
1301   }
1302   __ bind(*op->stub()->continuation());
1303 }
1304 
1305 void LIR_Assembler::type_profile_helper(Register mdo,
1306                                         ciMethodData *md, ciProfileData *data,
1307                                         Register recv, Label* update_done) {
1308   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1309     Label next_test;
1310     // See if the receiver is receiver[n].
1311     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1312     __ ldr(rscratch1, Address(rscratch2));
1313     __ cmp(recv, rscratch1);
1314     __ br(Assembler::NE, next_test);
1315     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1316     __ addptr(data_addr, DataLayout::counter_increment);
1317     __ b(*update_done);
1318     __ bind(next_test);
1319   }
1320 
1321   // Didn't find receiver; find next empty slot and fill it in
1322   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1323     Label next_test;
1324     __ lea(rscratch2,
1325            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1326     Address recv_addr(rscratch2);
1327     __ ldr(rscratch1, recv_addr);
1328     __ cbnz(rscratch1, next_test);
1329     __ str(recv, recv_addr);
1330     __ mov(rscratch1, DataLayout::counter_increment);
1331     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1332     __ str(rscratch1, Address(rscratch2));
1333     __ b(*update_done);
1334     __ bind(next_test);
1335   }
1336 }
1337 
1338 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1339   // we always need a stub for the failure case.
1340   CodeStub* stub = op->stub();
1341   Register obj = op->object()->as_register();
1342   Register k_RInfo = op->tmp1()->as_register();
1343   Register klass_RInfo = op->tmp2()->as_register();
1344   Register dst = op->result_opr()->as_register();
1345   ciKlass* k = op->klass();
1346   Register Rtmp1 = noreg;
1347 
1348   // check if it needs to be profiled
1349   ciMethodData* md;
1350   ciProfileData* data;
1351 
1352   const bool should_profile = op->should_profile();
1353 
1354   if (should_profile) {
1355     ciMethod* method = op->profiled_method();
1356     assert(method != NULL, "Should have method");
1357     int bci = op->profiled_bci();
1358     md = method->method_data_or_null();
1359     assert(md != NULL, "Sanity");
1360     data = md->bci_to_data(bci);
1361     assert(data != NULL,                "need data for type check");
1362     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1363   }
1364   Label profile_cast_success, profile_cast_failure;
1365   Label *success_target = should_profile ? &profile_cast_success : success;
1366   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1367 
1368   if (obj == k_RInfo) {
1369     k_RInfo = dst;
1370   } else if (obj == klass_RInfo) {
1371     klass_RInfo = dst;
1372   }
1373   if (k->is_loaded() && !UseCompressedClassPointers) {
1374     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1375   } else {
1376     Rtmp1 = op->tmp3()->as_register();
1377     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1378   }
1379 
1380   assert_different_registers(obj, k_RInfo, klass_RInfo);
1381 
1382     if (should_profile) {
1383       Label not_null;
1384       __ cbnz(obj, not_null);
1385       // Object is null; update MDO and exit
1386       Register mdo  = klass_RInfo;
1387       __ mov_metadata(mdo, md->constant_encoding());
1388       Address data_addr
1389         = __ form_address(rscratch2, mdo,
1390                           md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
1391                           LogBytesPerWord);
1392       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1393       __ ldr(rscratch1, data_addr);
1394       __ orr(rscratch1, rscratch1, header_bits);
1395       __ str(rscratch1, data_addr);
1396       __ b(*obj_is_null);
1397       __ bind(not_null);
1398     } else {
1399       __ cbz(obj, *obj_is_null);
1400     }
1401 
1402   if (!k->is_loaded()) {
1403     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1404   } else {
1405     __ mov_metadata(k_RInfo, k->constant_encoding());
1406   }
1407   __ verify_oop(obj);
1408 
1409   if (op->fast_check()) {
1410     // get object class
1411     // not a safepoint as obj null check happens earlier
1412     __ load_klass(rscratch1, obj);
1413     __ cmp( rscratch1, k_RInfo);
1414 
1415     __ br(Assembler::NE, *failure_target);
1416     // successful cast, fall through to profile or jump
1417   } else {
1418     // get object class
1419     // not a safepoint as obj null check happens earlier
1420     __ load_klass(klass_RInfo, obj);
1421     if (k->is_loaded()) {
1422       // See if we get an immediate positive hit
1423       __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1424       __ cmp(k_RInfo, rscratch1);
1425       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1426         __ br(Assembler::NE, *failure_target);
1427         // successful cast, fall through to profile or jump
1428       } else {
1429         // See if we get an immediate positive hit
1430         __ br(Assembler::EQ, *success_target);
1431         // check for self
1432         __ cmp(klass_RInfo, k_RInfo);
1433         __ br(Assembler::EQ, *success_target);
1434 
1435         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1436         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1437         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1438         // result is a boolean
1439         __ cbzw(klass_RInfo, *failure_target);
1440         // successful cast, fall through to profile or jump
1441       }
1442     } else {
1443       // perform the fast part of the checking logic
1444       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1445       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1446       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1447       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1448       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1449       // result is a boolean
1450       __ cbz(k_RInfo, *failure_target);
1451       // successful cast, fall through to profile or jump
1452     }
1453   }
1454   if (should_profile) {
1455     Register mdo  = klass_RInfo, recv = k_RInfo;
1456     __ bind(profile_cast_success);
1457     __ mov_metadata(mdo, md->constant_encoding());
1458     __ load_klass(recv, obj);
1459     Label update_done;
1460     type_profile_helper(mdo, md, data, recv, success);
1461     __ b(*success);
1462 
1463     __ bind(profile_cast_failure);
1464     __ mov_metadata(mdo, md->constant_encoding());
1465     Address counter_addr
1466       = __ form_address(rscratch2, mdo,
1467                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1468                         LogBytesPerWord);
1469     __ ldr(rscratch1, counter_addr);
1470     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1471     __ str(rscratch1, counter_addr);
1472     __ b(*failure);
1473   }
1474   __ b(*success);
1475 }
1476 
1477 
1478 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1479   const bool should_profile = op->should_profile();
1480 
1481   LIR_Code code = op->code();
1482   if (code == lir_store_check) {
1483     Register value = op->object()->as_register();
1484     Register array = op->array()->as_register();
1485     Register k_RInfo = op->tmp1()->as_register();
1486     Register klass_RInfo = op->tmp2()->as_register();
1487     Register Rtmp1 = op->tmp3()->as_register();
1488 
1489     CodeStub* stub = op->stub();
1490 
1491     // check if it needs to be profiled
1492     ciMethodData* md;
1493     ciProfileData* data;
1494 
1495     if (should_profile) {
1496       ciMethod* method = op->profiled_method();
1497       assert(method != NULL, "Should have method");
1498       int bci = op->profiled_bci();
1499       md = method->method_data_or_null();
1500       assert(md != NULL, "Sanity");
1501       data = md->bci_to_data(bci);
1502       assert(data != NULL,                "need data for type check");
1503       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1504     }
1505     Label profile_cast_success, profile_cast_failure, done;
1506     Label *success_target = should_profile ? &profile_cast_success : &done;
1507     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1508 
1509     if (should_profile) {
1510       Label not_null;
1511       __ cbnz(value, not_null);
1512       // Object is null; update MDO and exit
1513       Register mdo  = klass_RInfo;
1514       __ mov_metadata(mdo, md->constant_encoding());
1515       Address data_addr
1516         = __ form_address(rscratch2, mdo,
1517                           md->byte_offset_of_slot(data, DataLayout::header_offset()),
1518                           LogBytesPerInt);
1519       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1520       __ ldrw(rscratch1, data_addr);
1521       __ orrw(rscratch1, rscratch1, header_bits);
1522       __ strw(rscratch1, data_addr);
1523       __ b(done);
1524       __ bind(not_null);
1525     } else {
1526       __ cbz(value, done);
1527     }
1528 
1529     add_debug_info_for_null_check_here(op->info_for_exception());
1530     __ load_klass(k_RInfo, array);
1531     __ load_klass(klass_RInfo, value);
1532 
1533     // get instance klass (it's already uncompressed)
1534     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1535     // perform the fast part of the checking logic
1536     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1537     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1538     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1539     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1540     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1541     // result is a boolean
1542     __ cbzw(k_RInfo, *failure_target);
1543     // fall through to the success case
1544 
1545     if (should_profile) {
1546       Register mdo  = klass_RInfo, recv = k_RInfo;
1547       __ bind(profile_cast_success);
1548       __ mov_metadata(mdo, md->constant_encoding());
1549       __ load_klass(recv, value);
1550       Label update_done;
1551       type_profile_helper(mdo, md, data, recv, &done);
1552       __ b(done);
1553 
1554       __ bind(profile_cast_failure);
1555       __ mov_metadata(mdo, md->constant_encoding());
1556       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1557       __ lea(rscratch2, counter_addr);
1558       __ ldr(rscratch1, Address(rscratch2));
1559       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1560       __ str(rscratch1, Address(rscratch2));
1561       __ b(*stub->entry());
1562     }
1563 
1564     __ bind(done);
1565   } else if (code == lir_checkcast) {
1566     Register obj = op->object()->as_register();
1567     Register dst = op->result_opr()->as_register();
1568     Label success;
1569     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1570     __ bind(success);
1571     if (dst != obj) {
1572       __ mov(dst, obj);
1573     }
1574   } else if (code == lir_instanceof) {
1575     Register obj = op->object()->as_register();
1576     Register dst = op->result_opr()->as_register();
1577     Label success, failure, done;
1578     emit_typecheck_helper(op, &success, &failure, &failure);
1579     __ bind(failure);
1580     __ mov(dst, zr);
1581     __ b(done);
1582     __ bind(success);
1583     __ mov(dst, 1);
1584     __ bind(done);
1585   } else {
1586     ShouldNotReachHere();
1587   }
1588 }
1589 
1590 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1591   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1592   __ cset(rscratch1, Assembler::NE);
1593   __ membar(__ AnyAny);
1594 }
1595 
1596 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1597   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1598   __ cset(rscratch1, Assembler::NE);
1599   __ membar(__ AnyAny);
1600 }
1601 
1602 
1603 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1604   Register addr = as_reg(op->addr());
1605   Register newval = as_reg(op->new_value());
1606   Register cmpval = as_reg(op->cmp_value());
1607   Register res = op->result_opr()->as_register();
1608 
1609   if (op->code() == lir_cas_obj) {
1610     assert(op->tmp1()->is_valid(), "must be");
1611     Register t1 = op->tmp1()->as_register();
1612     if (UseCompressedOops) {
1613       if (UseShenandoahGC) {
1614         __ encode_heap_oop(t1, cmpval);
1615         cmpval = t1;
1616         assert(op->tmp2()->is_valid(), "must be");
1617         Register t2 = op->tmp2()->as_register();
1618         __ encode_heap_oop(t2, newval);
1619         newval = t2;
1620         __ cmpxchg_oop_shenandoah(res, addr, cmpval, newval, true, true, true);
1621       } else {
1622         __ encode_heap_oop(t1, cmpval);
1623         cmpval = t1;
1624         __ encode_heap_oop(rscratch2, newval);
1625         newval = rscratch2;
1626         casw(addr, newval, cmpval);
1627         __ eorw (res, r8, 1);
1628       }
1629     } else {
1630       if (UseShenandoahGC) {
1631         __ cmpxchg_oop_shenandoah(res, addr, cmpval, newval, false, true, true);
1632       } else {
1633         casl(addr, newval, cmpval);
1634         __ eorw (res, r8, 1);
1635       }
1636     }
1637   } else if (op->code() == lir_cas_int) {
1638     casw(addr, newval, cmpval);
1639     __ eorw (res, r8, 1);
1640   } else {
1641     casl(addr, newval, cmpval);
1642     __ eorw (res, r8, 1);
1643   }
1644 }
1645 
1646 
1647 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1648 
1649   Assembler::Condition acond, ncond;
1650   switch (condition) {
1651   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1652   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1653   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1654   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1655   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1656   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1657   case lir_cond_belowEqual:
1658   case lir_cond_aboveEqual:
1659   default:                    ShouldNotReachHere();
1660     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1661   }
1662 
1663   assert(result->is_single_cpu() || result->is_double_cpu(),
1664          "expect single register for result");
1665   if (opr1->is_constant() && opr2->is_constant()
1666       && opr1->type() == T_INT && opr2->type() == T_INT) {
1667     jint val1 = opr1->as_jint();
1668     jint val2 = opr2->as_jint();
1669     if (val1 == 0 && val2 == 1) {
1670       __ cset(result->as_register(), ncond);
1671       return;
1672     } else if (val1 == 1 && val2 == 0) {
1673       __ cset(result->as_register(), acond);
1674       return;
1675     }
1676   }
1677 
1678   if (opr1->is_constant() && opr2->is_constant()
1679       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1680     jlong val1 = opr1->as_jlong();
1681     jlong val2 = opr2->as_jlong();
1682     if (val1 == 0 && val2 == 1) {
1683       __ cset(result->as_register_lo(), ncond);
1684       return;
1685     } else if (val1 == 1 && val2 == 0) {
1686       __ cset(result->as_register_lo(), acond);
1687       return;
1688     }
1689   }
1690 
1691   if (opr1->is_stack()) {
1692     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1693     opr1 = FrameMap::rscratch1_opr;
1694   } else if (opr1->is_constant()) {
1695     LIR_Opr tmp
1696       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1697     const2reg(opr1, tmp, lir_patch_none, NULL);
1698     opr1 = tmp;
1699   }
1700 
1701   if (opr2->is_stack()) {
1702     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1703     opr2 = FrameMap::rscratch2_opr;
1704   } else if (opr2->is_constant()) {
1705     LIR_Opr tmp
1706       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1707     const2reg(opr2, tmp, lir_patch_none, NULL);
1708     opr2 = tmp;
1709   }
1710 
1711   if (result->type() == T_LONG)
1712     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1713   else
1714     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1715 }
1716 
1717 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1718   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1719 
1720   if (left->is_single_cpu()) {
1721     Register lreg = left->as_register();
1722     Register dreg = as_reg(dest);
1723 
1724     if (right->is_single_cpu()) {
1725       // cpu register - cpu register
1726 
1727       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1728              "should be");
1729       Register rreg = right->as_register();
1730       switch (code) {
1731       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1732       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1733       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1734       default:      ShouldNotReachHere();
1735       }
1736 
1737     } else if (right->is_double_cpu()) {
1738       Register rreg = right->as_register_lo();
1739       // single_cpu + double_cpu: can happen with obj+long
1740       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1741       switch (code) {
1742       case lir_add: __ add(dreg, lreg, rreg); break;
1743       case lir_sub: __ sub(dreg, lreg, rreg); break;
1744       default: ShouldNotReachHere();
1745       }
1746     } else if (right->is_constant()) {
1747       // cpu register - constant
1748       jlong c;
1749 
1750       // FIXME.  This is fugly: we really need to factor all this logic.
1751       switch(right->type()) {
1752       case T_LONG:
1753         c = right->as_constant_ptr()->as_jlong();
1754         break;
1755       case T_INT:
1756       case T_ADDRESS:
1757         c = right->as_constant_ptr()->as_jint();
1758         break;
1759       default:
1760         ShouldNotReachHere();
1761         c = 0;  // unreachable
1762         break;
1763       }
1764 
1765       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1766       if (c == 0 && dreg == lreg) {
1767         COMMENT("effective nop elided");
1768         return;
1769       }
1770       switch(left->type()) {
1771       case T_INT:
1772         switch (code) {
1773         case lir_add: __ addw(dreg, lreg, c); break;
1774         case lir_sub: __ subw(dreg, lreg, c); break;
1775         default: ShouldNotReachHere();
1776         }
1777         break;
1778       case T_OBJECT:
1779       case T_ADDRESS:
1780         switch (code) {
1781         case lir_add: __ add(dreg, lreg, c); break;
1782         case lir_sub: __ sub(dreg, lreg, c); break;
1783         default: ShouldNotReachHere();
1784         }
1785         break;
1786         ShouldNotReachHere();
1787       }
1788     } else {
1789       ShouldNotReachHere();
1790     }
1791 
1792   } else if (left->is_double_cpu()) {
1793     Register lreg_lo = left->as_register_lo();
1794 
1795     if (right->is_double_cpu()) {
1796       // cpu register - cpu register
1797       Register rreg_lo = right->as_register_lo();
1798       switch (code) {
1799       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1800       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1801       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1802       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1803       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1804       default:
1805         ShouldNotReachHere();
1806       }
1807 
1808     } else if (right->is_constant()) {
1809       jlong c = right->as_constant_ptr()->as_jlong_bits();
1810       Register dreg = as_reg(dest);
1811       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1812       if (c == 0 && dreg == lreg_lo) {
1813         COMMENT("effective nop elided");
1814         return;
1815       }
1816       switch (code) {
1817         case lir_add: __ add(dreg, lreg_lo, c); break;
1818         case lir_sub: __ sub(dreg, lreg_lo, c); break;
1819         default:
1820           ShouldNotReachHere();
1821       }
1822     } else {
1823       ShouldNotReachHere();
1824     }
1825   } else if (left->is_single_fpu()) {
1826     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1827     switch (code) {
1828     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1829     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1830     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1831     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1832     default:
1833       ShouldNotReachHere();
1834     }
1835   } else if (left->is_double_fpu()) {
1836     if (right->is_double_fpu()) {
1837       // cpu register - cpu register
1838       switch (code) {
1839       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1840       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1841       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1842       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1843       default:
1844         ShouldNotReachHere();
1845       }
1846     } else {
1847       if (right->is_constant()) {
1848         ShouldNotReachHere();
1849       }
1850       ShouldNotReachHere();
1851     }
1852   } else if (left->is_single_stack() || left->is_address()) {
1853     assert(left == dest, "left and dest must be equal");
1854     ShouldNotReachHere();
1855   } else {
1856     ShouldNotReachHere();
1857   }
1858 }
1859 
1860 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1861 
1862 
1863 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1864   switch(code) {
1865   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1866   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1867   default      : ShouldNotReachHere();
1868   }
1869 }
1870 
1871 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1872 
1873   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1874   Register Rleft = left->is_single_cpu() ? left->as_register() :
1875                                            left->as_register_lo();
1876    if (dst->is_single_cpu()) {
1877      Register Rdst = dst->as_register();
1878      if (right->is_constant()) {
1879        switch (code) {
1880          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1881          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1882          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1883          default: ShouldNotReachHere(); break;
1884        }
1885      } else {
1886        Register Rright = right->is_single_cpu() ? right->as_register() :
1887                                                   right->as_register_lo();
1888        switch (code) {
1889          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1890          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1891          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1892          default: ShouldNotReachHere(); break;
1893        }
1894      }
1895    } else {
1896      Register Rdst = dst->as_register_lo();
1897      if (right->is_constant()) {
1898        switch (code) {
1899          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1900          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1901          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1902          default: ShouldNotReachHere(); break;
1903        }
1904      } else {
1905        Register Rright = right->is_single_cpu() ? right->as_register() :
1906                                                   right->as_register_lo();
1907        switch (code) {
1908          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1909          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1910          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1911          default: ShouldNotReachHere(); break;
1912        }
1913      }
1914    }
1915 }
1916 
1917 
1918 
1919 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
1920 
1921 
1922 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1923   if (opr1->is_constant() && opr2->is_single_cpu()) {
1924     // tableswitch
1925     Register reg = as_reg(opr2);
1926     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1927     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1928   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1929     Register reg1 = as_reg(opr1);
1930     if (opr2->is_single_cpu()) {
1931       // cpu register - cpu register
1932       Register reg2 = opr2->as_register();
1933       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1934         __ cmp(reg1, reg2);
1935         if (UseShenandoahGC) {
1936           oopDesc::bs()->asm_acmp_barrier(masm(), reg1, reg2);
1937         }
1938       } else {
1939         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1940         __ cmpw(reg1, reg2);
1941       }
1942       return;
1943     }
1944     if (opr2->is_double_cpu()) {
1945       // cpu register - cpu register
1946       Register reg2 = opr2->as_register_lo();
1947       __ cmp(reg1, reg2);
1948       return;
1949     }
1950 
1951     if (opr2->is_constant()) {
1952       jlong imm;
1953       switch(opr2->type()) {
1954       case T_LONG:
1955         imm = opr2->as_constant_ptr()->as_jlong();
1956         break;
1957       case T_INT:
1958       case T_ADDRESS:
1959         imm = opr2->as_constant_ptr()->as_jint();
1960         break;
1961       case T_OBJECT:
1962       case T_ARRAY:
1963         imm = jlong(opr2->as_constant_ptr()->as_jobject());
1964         break;
1965       default:
1966         ShouldNotReachHere();
1967         imm = 0;  // unreachable
1968         break;
1969       }
1970 
1971       if (opr2->type() == T_OBJECT || opr2->type() == T_ARRAY) {
1972         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1973         __ cmp(reg1, rscratch1);
1974         if (UseShenandoahGC
1975             && (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY)) {
1976           oopDesc::bs()->asm_acmp_barrier(masm(), reg1, rscratch1);
1977         }
1978         return;
1979       }
1980 
1981       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1982         if (type2aelembytes(opr1->type()) <= 4)
1983           __ cmpw(reg1, imm);
1984         else
1985           __ cmp(reg1, imm);
1986         return;
1987       } else {
1988         __ mov(rscratch1, imm);
1989         if (type2aelembytes(opr1->type()) <= 4)
1990           __ cmpw(reg1, rscratch1);
1991         else
1992           __ cmp(reg1, rscratch1);
1993         return;
1994       }
1995     } else
1996       ShouldNotReachHere();
1997   } else if (opr1->is_single_fpu()) {
1998     FloatRegister reg1 = opr1->as_float_reg();
1999     assert(opr2->is_single_fpu(), "expect single float register");
2000     FloatRegister reg2 = opr2->as_float_reg();
2001     __ fcmps(reg1, reg2);
2002   } else if (opr1->is_double_fpu()) {
2003     FloatRegister reg1 = opr1->as_double_reg();
2004     assert(opr2->is_double_fpu(), "expect double float register");
2005     FloatRegister reg2 = opr2->as_double_reg();
2006     __ fcmpd(reg1, reg2);
2007   } else {
2008     ShouldNotReachHere();
2009   }
2010 }
2011 
2012 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2013   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2014     bool is_unordered_less = (code == lir_ucmp_fd2i);
2015     if (left->is_single_fpu()) {
2016       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2017     } else if (left->is_double_fpu()) {
2018       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2019     } else {
2020       ShouldNotReachHere();
2021     }
2022   } else if (code == lir_cmp_l2i) {
2023     Label done;
2024     __ cmp(left->as_register_lo(), right->as_register_lo());
2025     __ mov(dst->as_register(), (u_int64_t)-1L);
2026     __ br(Assembler::LT, done);
2027     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2028     __ bind(done);
2029   } else {
2030     ShouldNotReachHere();
2031   }
2032 }
2033 
2034 
2035 void LIR_Assembler::align_call(LIR_Code code) {  }
2036 
2037 
2038 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2039   address call = __ trampoline_call(Address(op->addr(), rtype));
2040   if (call == NULL) {
2041     bailout("trampoline stub overflow");
2042     return;
2043   }
2044   add_call_info(code_offset(), op->info());
2045 }
2046 
2047 
2048 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2049   address call = __ ic_call(op->addr());
2050   if (call == NULL) {
2051     bailout("trampoline stub overflow");
2052     return;
2053   }
2054   add_call_info(code_offset(), op->info());
2055 }
2056 
2057 
2058 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2059 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2060   ShouldNotReachHere();
2061 }
2062 
2063 
2064 void LIR_Assembler::emit_static_call_stub() {
2065   address call_pc = __ pc();
2066   address stub = __ start_a_stub(call_stub_size);
2067   if (stub == NULL) {
2068     bailout("static call stub overflow");
2069     return;
2070   }
2071 
2072   int start = __ offset();
2073 
2074   __ relocate(static_stub_Relocation::spec(call_pc));
2075   __ mov_metadata(rmethod, (Metadata*)NULL);
2076   __ movptr(rscratch1, 0);
2077   __ br(rscratch1);
2078 
2079   assert(__ offset() - start <= call_stub_size, "stub too big");
2080   __ end_a_stub();
2081 }
2082 
2083 
2084 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2085   assert(exceptionOop->as_register() == r0, "must match");
2086   assert(exceptionPC->as_register() == r3, "must match");
2087 
2088   // exception object is not added to oop map by LinearScan
2089   // (LinearScan assumes that no oops are in fixed registers)
2090   info->add_register_oop(exceptionOop);
2091   Runtime1::StubID unwind_id;
2092 
2093   // get current pc information
2094   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2095   int pc_for_athrow_offset = __ offset();
2096   InternalAddress pc_for_athrow(__ pc());
2097   __ adr(exceptionPC->as_register(), pc_for_athrow);
2098   add_call_info(pc_for_athrow_offset, info); // for exception handler
2099 
2100   __ verify_not_null_oop(r0);
2101   // search an exception handler (r0: exception oop, r3: throwing pc)
2102   if (compilation()->has_fpu_code()) {
2103     unwind_id = Runtime1::handle_exception_id;
2104   } else {
2105     unwind_id = Runtime1::handle_exception_nofpu_id;
2106   }
2107   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2108 
2109   // FIXME: enough room for two byte trap   ????
2110   __ nop();
2111 }
2112 
2113 
2114 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2115   assert(exceptionOop->as_register() == r0, "must match");
2116 
2117   __ b(_unwind_handler_entry);
2118 }
2119 
2120 
2121 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2122   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2123   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2124 
2125   switch (left->type()) {
2126     case T_INT: {
2127       switch (code) {
2128       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2129       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2130       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2131       default:
2132         ShouldNotReachHere();
2133         break;
2134       }
2135       break;
2136     case T_LONG:
2137     case T_ADDRESS:
2138     case T_OBJECT:
2139       switch (code) {
2140       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2141       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2142       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2143       default:
2144         ShouldNotReachHere();
2145         break;
2146       }
2147       break;
2148     default:
2149       ShouldNotReachHere();
2150       break;
2151     }
2152   }
2153 }
2154 
2155 
2156 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2157   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2158   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2159 
2160   switch (left->type()) {
2161     case T_INT: {
2162       switch (code) {
2163       case lir_shl:  __ lslw (dreg, lreg, count); break;
2164       case lir_shr:  __ asrw (dreg, lreg, count); break;
2165       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2166       default:
2167         ShouldNotReachHere();
2168         break;
2169       }
2170       break;
2171     case T_LONG:
2172     case T_ADDRESS:
2173     case T_OBJECT:
2174       switch (code) {
2175       case lir_shl:  __ lsl (dreg, lreg, count); break;
2176       case lir_shr:  __ asr (dreg, lreg, count); break;
2177       case lir_ushr: __ lsr (dreg, lreg, count); break;
2178       default:
2179         ShouldNotReachHere();
2180         break;
2181       }
2182       break;
2183     default:
2184       ShouldNotReachHere();
2185       break;
2186     }
2187   }
2188 }
2189 
2190 
2191 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2192   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2193   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2194   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2195   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2196 }
2197 
2198 
2199 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2200   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2201   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2202   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2203   __ mov (rscratch1, c);
2204   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2205 }
2206 
2207 
2208 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2209   ShouldNotReachHere();
2210   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2211   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2212   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2213   __ lea(rscratch1, __ constant_oop_address(o));
2214   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2215 }
2216 
2217 
2218 // This code replaces a call to arraycopy; no exception may
2219 // be thrown in this code, they must be thrown in the System.arraycopy
2220 // activation frame; we could save some checks if this would not be the case
2221 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2222   ciArrayKlass* default_type = op->expected_type();
2223   Register src = op->src()->as_register();
2224   Register dst = op->dst()->as_register();
2225   Register src_pos = op->src_pos()->as_register();
2226   Register dst_pos = op->dst_pos()->as_register();
2227   Register length  = op->length()->as_register();
2228   Register tmp = op->tmp()->as_register();
2229 
2230   CodeStub* stub = op->stub();
2231   int flags = op->flags();
2232   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2233   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2234 
2235   // if we don't know anything, just go through the generic arraycopy
2236   if (default_type == NULL // || basic_type == T_OBJECT
2237       ) {
2238     Label done;
2239     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2240 
2241     // Save the arguments in case the generic arraycopy fails and we
2242     // have to fall back to the JNI stub
2243     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2244     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2245     __ str(src,              Address(sp, 4*BytesPerWord));
2246 
2247     address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
2248     address copyfunc_addr = StubRoutines::generic_arraycopy();
2249 
2250     // The arguments are in java calling convention so we shift them
2251     // to C convention
2252     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2253     __ mov(c_rarg0, j_rarg0);
2254     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2255     __ mov(c_rarg1, j_rarg1);
2256     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2257     __ mov(c_rarg2, j_rarg2);
2258     assert_different_registers(c_rarg3, j_rarg4);
2259     __ mov(c_rarg3, j_rarg3);
2260     __ mov(c_rarg4, j_rarg4);
2261     if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2262       __ mov(rscratch1, RuntimeAddress(C_entry));
2263       __ blrt(rscratch1, 5, 0, 1);
2264     } else {
2265 #ifndef PRODUCT
2266       if (PrintC1Statistics) {
2267         __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2268       }
2269 #endif
2270       __ far_call(RuntimeAddress(copyfunc_addr));
2271     }
2272 
2273     __ cbz(r0, *stub->continuation());
2274 
2275     // Reload values from the stack so they are where the stub
2276     // expects them.
2277     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2278     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2279     __ ldr(src,              Address(sp, 4*BytesPerWord));
2280 
2281     if (copyfunc_addr != NULL) {
2282       // r0 is -1^K where K == partial copied count
2283       __ eonw(rscratch1, r0, 0);
2284       // adjust length down and src/end pos up by partial copied count
2285       __ subw(length, length, rscratch1);
2286       __ addw(src_pos, src_pos, rscratch1);
2287       __ addw(dst_pos, dst_pos, rscratch1);
2288     }
2289     __ b(*stub->entry());
2290 
2291     __ bind(*stub->continuation());
2292     return;
2293   }
2294 
2295   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2296 
2297   int elem_size = type2aelembytes(basic_type);
2298   int shift_amount;
2299   int scale = exact_log2(elem_size);
2300 
2301   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2302   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2303   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2304   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2305 
2306   // test for NULL
2307   if (flags & LIR_OpArrayCopy::src_null_check) {
2308     __ cbz(src, *stub->entry());
2309   }
2310   if (flags & LIR_OpArrayCopy::dst_null_check) {
2311     __ cbz(dst, *stub->entry());
2312   }
2313 
2314   // check if negative
2315   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2316     __ cmpw(src_pos, 0);
2317     __ br(Assembler::LT, *stub->entry());
2318   }
2319   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2320     __ cmpw(dst_pos, 0);
2321     __ br(Assembler::LT, *stub->entry());
2322   }
2323 
2324   if (flags & LIR_OpArrayCopy::length_positive_check) {
2325     __ cmpw(length, 0);
2326     __ br(Assembler::LT, *stub->entry());
2327   }
2328 
2329   if (flags & LIR_OpArrayCopy::src_range_check) {
2330     __ addw(tmp, src_pos, length);
2331     __ ldrw(rscratch1, src_length_addr);
2332     __ cmpw(tmp, rscratch1);
2333     __ br(Assembler::HI, *stub->entry());
2334   }
2335   if (flags & LIR_OpArrayCopy::dst_range_check) {
2336     __ addw(tmp, dst_pos, length);
2337     __ ldrw(rscratch1, dst_length_addr);
2338     __ cmpw(tmp, rscratch1);
2339     __ br(Assembler::HI, *stub->entry());
2340   }
2341 
2342   // FIXME: The logic in LIRGenerator::arraycopy_helper clears
2343   // length_positive_check if the source of our length operand is an
2344   // arraylength.  However, that arraylength might be zero, and the
2345   // stub that we're about to call contains an assertion that count !=
2346   // 0 .  So we make this check purely in order not to trigger an
2347   // assertion failure.
2348   __ cbzw(length, *stub->continuation());
2349 
2350   if (flags & LIR_OpArrayCopy::type_check) {
2351     // We don't know the array types are compatible
2352     if (basic_type != T_OBJECT) {
2353       // Simple test for basic type arrays
2354       if (UseCompressedClassPointers) {
2355         __ ldrw(tmp, src_klass_addr);
2356         __ ldrw(rscratch1, dst_klass_addr);
2357         __ cmpw(tmp, rscratch1);
2358       } else {
2359         __ ldr(tmp, src_klass_addr);
2360         __ ldr(rscratch1, dst_klass_addr);
2361         __ cmp(tmp, rscratch1);
2362       }
2363       __ br(Assembler::NE, *stub->entry());
2364     } else {
2365       // For object arrays, if src is a sub class of dst then we can
2366       // safely do the copy.
2367       Label cont, slow;
2368 
2369 #define PUSH(r1, r2)                                    \
2370       stp(r1, r2, __ pre(sp, -2 * wordSize));
2371 
2372 #define POP(r1, r2)                                     \
2373       ldp(r1, r2, __ post(sp, 2 * wordSize));
2374 
2375       __ PUSH(src, dst);
2376 
2377       __ load_klass(src, src);
2378       __ load_klass(dst, dst);
2379 
2380       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2381 
2382       __ PUSH(src, dst);
2383       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2384       __ POP(src, dst);
2385 
2386       __ cbnz(src, cont);
2387 
2388       __ bind(slow);
2389       __ POP(src, dst);
2390 
2391       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2392       if (copyfunc_addr != NULL) { // use stub if available
2393         // src is not a sub class of dst so we have to do a
2394         // per-element check.
2395 
2396         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2397         if ((flags & mask) != mask) {
2398           // Check that at least both of them object arrays.
2399           assert(flags & mask, "one of the two should be known to be an object array");
2400 
2401           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2402             __ load_klass(tmp, src);
2403           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2404             __ load_klass(tmp, dst);
2405           }
2406           int lh_offset = in_bytes(Klass::layout_helper_offset());
2407           Address klass_lh_addr(tmp, lh_offset);
2408           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2409           __ ldrw(rscratch1, klass_lh_addr);
2410           __ mov(rscratch2, objArray_lh);
2411           __ eorw(rscratch1, rscratch1, rscratch2);
2412           __ cbnzw(rscratch1, *stub->entry());
2413         }
2414 
2415        // Spill because stubs can use any register they like and it's
2416        // easier to restore just those that we care about.
2417         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2418         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2419         __ str(src,              Address(sp, 4*BytesPerWord));
2420 
2421         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2422         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2423         assert_different_registers(c_rarg0, dst, dst_pos, length);
2424         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2425         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2426         assert_different_registers(c_rarg1, dst, length);
2427         __ uxtw(c_rarg2, length);
2428         assert_different_registers(c_rarg2, dst);
2429 
2430         __ load_klass(c_rarg4, dst);
2431         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2432         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2433         __ far_call(RuntimeAddress(copyfunc_addr));
2434 
2435 #ifndef PRODUCT
2436         if (PrintC1Statistics) {
2437           Label failed;
2438           __ cbnz(r0, failed);
2439           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2440           __ bind(failed);
2441         }
2442 #endif
2443 
2444         __ cbz(r0, *stub->continuation());
2445 
2446 #ifndef PRODUCT
2447         if (PrintC1Statistics) {
2448           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2449         }
2450 #endif
2451         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2452 
2453         // Restore previously spilled arguments
2454         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2455         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2456         __ ldr(src,              Address(sp, 4*BytesPerWord));
2457 
2458         // return value is -1^K where K is partial copied count
2459         __ eonw(rscratch1, r0, zr);
2460         // adjust length down and src/end pos up by partial copied count
2461         __ subw(length, length, rscratch1);
2462         __ addw(src_pos, src_pos, rscratch1);
2463         __ addw(dst_pos, dst_pos, rscratch1);
2464       }
2465 
2466       __ b(*stub->entry());
2467 
2468       __ bind(cont);
2469       __ POP(src, dst);
2470     }
2471   }
2472 
2473 #ifdef ASSERT
2474   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2475     // Sanity check the known type with the incoming class.  For the
2476     // primitive case the types must match exactly with src.klass and
2477     // dst.klass each exactly matching the default type.  For the
2478     // object array case, if no type check is needed then either the
2479     // dst type is exactly the expected type and the src type is a
2480     // subtype which we can't check or src is the same array as dst
2481     // but not necessarily exactly of type default_type.
2482     Label known_ok, halt;
2483     __ mov_metadata(tmp, default_type->constant_encoding());
2484     if (UseCompressedClassPointers) {
2485       __ encode_klass_not_null(tmp);
2486     }
2487 
2488     if (basic_type != T_OBJECT) {
2489 
2490       if (UseCompressedClassPointers) {
2491         __ ldrw(rscratch1, dst_klass_addr);
2492         __ cmpw(tmp, rscratch1);
2493       } else {
2494         __ ldr(rscratch1, dst_klass_addr);
2495         __ cmp(tmp, rscratch1);
2496       }
2497       __ br(Assembler::NE, halt);
2498       if (UseCompressedClassPointers) {
2499         __ ldrw(rscratch1, src_klass_addr);
2500         __ cmpw(tmp, rscratch1);
2501       } else {
2502         __ ldr(rscratch1, src_klass_addr);
2503         __ cmp(tmp, rscratch1);
2504       }
2505       __ br(Assembler::EQ, known_ok);
2506     } else {
2507       if (UseCompressedClassPointers) {
2508         __ ldrw(rscratch1, dst_klass_addr);
2509         __ cmpw(tmp, rscratch1);
2510       } else {
2511         __ ldr(rscratch1, dst_klass_addr);
2512         __ cmp(tmp, rscratch1);
2513       }
2514       __ br(Assembler::EQ, known_ok);
2515       __ cmp(src, dst);
2516       __ br(Assembler::EQ, known_ok);
2517     }
2518     __ bind(halt);
2519     __ stop("incorrect type information in arraycopy");
2520     __ bind(known_ok);
2521   }
2522 #endif
2523 
2524 #ifndef PRODUCT
2525   if (PrintC1Statistics) {
2526     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2527   }
2528 #endif
2529 
2530   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2531   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2532   assert_different_registers(c_rarg0, dst, dst_pos, length);
2533   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2534   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2535   assert_different_registers(c_rarg1, dst, length);
2536   __ uxtw(c_rarg2, length);
2537   assert_different_registers(c_rarg2, dst);
2538 
2539   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2540   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2541   const char *name;
2542   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2543 
2544  CodeBlob *cb = CodeCache::find_blob(entry);
2545  if (cb) {
2546    __ far_call(RuntimeAddress(entry));
2547  } else {
2548    __ call_VM_leaf(entry, 3);
2549  }
2550 
2551   __ bind(*stub->continuation());
2552 }
2553 
2554 
2555 
2556 
2557 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2558   Register obj = op->obj_opr()->as_register();  // may not be an oop
2559   Register hdr = op->hdr_opr()->as_register();
2560   Register lock = op->lock_opr()->as_register();
2561   if (!UseFastLocking) {
2562     __ b(*op->stub()->entry());
2563   } else if (op->code() == lir_lock) {
2564     Register scratch = noreg;
2565     if (UseBiasedLocking) {
2566       scratch = op->scratch_opr()->as_register();
2567     }
2568     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2569     // add debug info for NullPointerException only if one is possible
2570     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2571     if (op->info() != NULL) {
2572       add_debug_info_for_null_check(null_check_offset, op->info());
2573     }
2574     // done
2575   } else if (op->code() == lir_unlock) {
2576     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2577     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2578   } else {
2579     Unimplemented();
2580   }
2581   __ bind(*op->stub()->continuation());
2582 }
2583 
2584 
2585 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2586   ciMethod* method = op->profiled_method();
2587   int bci          = op->profiled_bci();
2588   ciMethod* callee = op->profiled_callee();
2589 
2590   // Update counter for all call types
2591   ciMethodData* md = method->method_data_or_null();
2592   assert(md != NULL, "Sanity");
2593   ciProfileData* data = md->bci_to_data(bci);
2594   assert(data->is_CounterData(), "need CounterData for calls");
2595   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2596   Register mdo  = op->mdo()->as_register();
2597   __ mov_metadata(mdo, md->constant_encoding());
2598   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2599   Bytecodes::Code bc = method->java_code_at_bci(bci);
2600   const bool callee_is_static = callee->is_loaded() && callee->is_static();
2601   // Perform additional virtual call profiling for invokevirtual and
2602   // invokeinterface bytecodes
2603   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2604       !callee_is_static &&  // required for optimized MH invokes
2605       C1ProfileVirtualCalls) {
2606     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2607     Register recv = op->recv()->as_register();
2608     assert_different_registers(mdo, recv);
2609     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2610     ciKlass* known_klass = op->known_holder();
2611     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2612       // We know the type that will be seen at this call site; we can
2613       // statically update the MethodData* rather than needing to do
2614       // dynamic tests on the receiver type
2615 
2616       // NOTE: we should probably put a lock around this search to
2617       // avoid collisions by concurrent compilations
2618       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2619       uint i;
2620       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2621         ciKlass* receiver = vc_data->receiver(i);
2622         if (known_klass->equals(receiver)) {
2623           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2624           __ addptr(data_addr, DataLayout::counter_increment);
2625           return;
2626         }
2627       }
2628 
2629       // Receiver type not found in profile data; select an empty slot
2630 
2631       // Note that this is less efficient than it should be because it
2632       // always does a write to the receiver part of the
2633       // VirtualCallData rather than just the first time
2634       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2635         ciKlass* receiver = vc_data->receiver(i);
2636         if (receiver == NULL) {
2637           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2638           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2639           __ lea(rscratch2, recv_addr);
2640           __ str(rscratch1, Address(rscratch2));
2641           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2642           __ addptr(data_addr, DataLayout::counter_increment);
2643           return;
2644         }
2645       }
2646     } else {
2647       __ load_klass(recv, recv);
2648       Label update_done;
2649       type_profile_helper(mdo, md, data, recv, &update_done);
2650       // Receiver did not match any saved receiver and there is no empty row for it.
2651       // Increment total counter to indicate polymorphic case.
2652       __ addptr(counter_addr, DataLayout::counter_increment);
2653 
2654       __ bind(update_done);
2655     }
2656   } else {
2657     // Static call
2658     __ addptr(counter_addr, DataLayout::counter_increment);
2659   }
2660 }
2661 
2662 
2663 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2664   Unimplemented();
2665 }
2666 
2667 
2668 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2669   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2670 }
2671 
2672 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2673   assert(op->crc()->is_single_cpu(),  "crc must be register");
2674   assert(op->val()->is_single_cpu(),  "byte value must be register");
2675   assert(op->result_opr()->is_single_cpu(), "result must be register");
2676   Register crc = op->crc()->as_register();
2677   Register val = op->val()->as_register();
2678   Register res = op->result_opr()->as_register();
2679 
2680   assert_different_registers(val, crc, res);
2681   unsigned long offset;
2682   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2683   if (offset) __ add(res, res, offset);
2684 
2685   __ ornw(crc, zr, crc); // ~crc
2686   __ update_byte_crc32(crc, val, res);
2687   __ ornw(res, zr, crc); // ~crc
2688 }
2689 
2690 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2691   COMMENT("emit_profile_type {");
2692   Register obj = op->obj()->as_register();
2693   Register tmp = op->tmp()->as_pointer_register();
2694   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2695   ciKlass* exact_klass = op->exact_klass();
2696   intptr_t current_klass = op->current_klass();
2697   bool not_null = op->not_null();
2698   bool no_conflict = op->no_conflict();
2699 
2700   Label update, next, none;
2701 
2702   bool do_null = !not_null;
2703   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2704   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2705 
2706   assert(do_null || do_update, "why are we here?");
2707   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2708   assert(mdo_addr.base() != rscratch1, "wrong register");
2709 
2710   __ verify_oop(obj);
2711 
2712   if (tmp != obj) {
2713     __ mov(tmp, obj);
2714   }
2715   if (do_null) {
2716     __ cbnz(tmp, update);
2717     if (!TypeEntries::was_null_seen(current_klass)) {
2718       __ ldr(rscratch2, mdo_addr);
2719       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2720       __ str(rscratch2, mdo_addr);
2721     }
2722     if (do_update) {
2723 #ifndef ASSERT
2724       __ b(next);
2725     }
2726 #else
2727       __ b(next);
2728     }
2729   } else {
2730     __ cbnz(tmp, update);
2731     __ stop("unexpected null obj");
2732 #endif
2733   }
2734 
2735   __ bind(update);
2736 
2737   if (do_update) {
2738 #ifdef ASSERT
2739     if (exact_klass != NULL) {
2740       Label ok;
2741       __ load_klass(tmp, tmp);
2742       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2743       __ eor(rscratch1, tmp, rscratch1);
2744       __ cbz(rscratch1, ok);
2745       __ stop("exact klass and actual klass differ");
2746       __ bind(ok);
2747     }
2748 #endif
2749     if (!no_conflict) {
2750       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2751         if (exact_klass != NULL) {
2752           __ mov_metadata(tmp, exact_klass->constant_encoding());
2753         } else {
2754           __ load_klass(tmp, tmp);
2755         }
2756 
2757         __ ldr(rscratch2, mdo_addr);
2758         __ eor(tmp, tmp, rscratch2);
2759         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2760         // klass seen before, nothing to do. The unknown bit may have been
2761         // set already but no need to check.
2762         __ cbz(rscratch1, next);
2763 
2764         __ andr(rscratch1, tmp, TypeEntries::type_unknown);
2765         __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2766 
2767         if (TypeEntries::is_type_none(current_klass)) {
2768           __ cbz(rscratch2, none);
2769           __ cmp(rscratch2, TypeEntries::null_seen);
2770           __ br(Assembler::EQ, none);
2771           // There is a chance that the checks above (re-reading profiling
2772           // data from memory) fail if another thread has just set the
2773           // profiling to this obj's klass
2774           __ dmb(Assembler::ISHLD);
2775           __ ldr(rscratch2, mdo_addr);
2776           __ eor(tmp, tmp, rscratch2);
2777           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2778           __ cbz(rscratch1, next);
2779         }
2780       } else {
2781         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2782                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2783 
2784         __ ldr(tmp, mdo_addr);
2785         __ andr(rscratch1, tmp, TypeEntries::type_unknown);
2786         __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2787       }
2788 
2789       // different than before. Cannot keep accurate profile.
2790       __ ldr(rscratch2, mdo_addr);
2791       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2792       __ str(rscratch2, mdo_addr);
2793 
2794       if (TypeEntries::is_type_none(current_klass)) {
2795         __ b(next);
2796 
2797         __ bind(none);
2798         // first time here. Set profile type.
2799         __ str(tmp, mdo_addr);
2800       }
2801     } else {
2802       // There's a single possible klass at this profile point
2803       assert(exact_klass != NULL, "should be");
2804       if (TypeEntries::is_type_none(current_klass)) {
2805         __ mov_metadata(tmp, exact_klass->constant_encoding());
2806         __ ldr(rscratch2, mdo_addr);
2807         __ eor(tmp, tmp, rscratch2);
2808         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2809         __ cbz(rscratch1, next);
2810 #ifdef ASSERT
2811         {
2812           Label ok;
2813           __ ldr(rscratch1, mdo_addr);
2814           __ cbz(rscratch1, ok);
2815           __ cmp(rscratch1, TypeEntries::null_seen);
2816           __ br(Assembler::EQ, ok);
2817           // may have been set by another thread
2818           __ dmb(Assembler::ISHLD);
2819           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2820           __ ldr(rscratch2, mdo_addr);
2821           __ eor(rscratch2, rscratch1, rscratch2);
2822           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2823           __ cbz(rscratch2, ok);
2824 
2825           __ stop("unexpected profiling mismatch");
2826           __ bind(ok);
2827         }
2828 #endif
2829         // first time here. Set profile type.
2830         __ ldr(tmp, mdo_addr);
2831       } else {
2832         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2833                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2834 
2835         __ ldr(tmp, mdo_addr);
2836         __ andr(rscratch1, tmp, TypeEntries::type_unknown);
2837         __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2838 
2839         __ orr(tmp, tmp, TypeEntries::type_unknown);
2840         __ str(tmp, mdo_addr);
2841         // FIXME: Write barrier needed here?
2842       }
2843     }
2844 
2845     __ bind(next);
2846   }
2847   COMMENT("} emit_profile_type");
2848 }
2849 
2850 
2851 void LIR_Assembler::align_backward_branch_target() {
2852 }
2853 
2854 
2855 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2856   if (left->is_single_cpu()) {
2857     assert(dest->is_single_cpu(), "expect single result reg");
2858     __ negw(dest->as_register(), left->as_register());
2859   } else if (left->is_double_cpu()) {
2860     assert(dest->is_double_cpu(), "expect double result reg");
2861     __ neg(dest->as_register_lo(), left->as_register_lo());
2862   } else if (left->is_single_fpu()) {
2863     assert(dest->is_single_fpu(), "expect single float result reg");
2864     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2865   } else {
2866     assert(left->is_double_fpu(), "expect double float operand reg");
2867     assert(dest->is_double_fpu(), "expect double float result reg");
2868     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2869   }
2870 }
2871 
2872 
2873 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
2874   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2875 }
2876 
2877 
2878 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2879   assert(!tmp->is_valid(), "don't need temporary");
2880 
2881   CodeBlob *cb = CodeCache::find_blob(dest);
2882   if (cb) {
2883     __ far_call(RuntimeAddress(dest));
2884   } else {
2885     __ mov(rscratch1, RuntimeAddress(dest));
2886     int len = args->length();
2887     int type = 0;
2888     if (! result->is_illegal()) {
2889       switch (result->type()) {
2890       case T_VOID:
2891         type = 0;
2892         break;
2893       case T_INT:
2894       case T_LONG:
2895       case T_OBJECT:
2896         type = 1;
2897         break;
2898       case T_FLOAT:
2899         type = 2;
2900         break;
2901       case T_DOUBLE:
2902         type = 3;
2903         break;
2904       default:
2905         ShouldNotReachHere();
2906         break;
2907       }
2908     }
2909     int num_gpargs = 0;
2910     int num_fpargs = 0;
2911     for (int i = 0; i < args->length(); i++) {
2912       LIR_Opr arg = args->at(i);
2913       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
2914         num_fpargs++;
2915       } else {
2916         num_gpargs++;
2917       }
2918     }
2919     __ blrt(rscratch1, num_gpargs, num_fpargs, type);
2920   }
2921 
2922   if (info != NULL) {
2923     add_call_info_here(info);
2924   }
2925   __ maybe_isb();
2926 }
2927 
2928 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2929   if (dest->is_address() || src->is_address()) {
2930     move_op(src, dest, type, lir_patch_none, info,
2931             /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2932   } else {
2933     ShouldNotReachHere();
2934   }
2935 }
2936 
2937 #ifdef ASSERT
2938 // emit run-time assertion
2939 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2940   assert(op->code() == lir_assert, "must be");
2941 
2942   if (op->in_opr1()->is_valid()) {
2943     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2944     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2945   } else {
2946     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2947     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2948   }
2949 
2950   Label ok;
2951   if (op->condition() != lir_cond_always) {
2952     Assembler::Condition acond = Assembler::AL;
2953     switch (op->condition()) {
2954       case lir_cond_equal:        acond = Assembler::EQ;  break;
2955       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2956       case lir_cond_less:         acond = Assembler::LT;  break;
2957       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2958       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2959       case lir_cond_greater:      acond = Assembler::GT;  break;
2960       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2961       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2962       default:                    ShouldNotReachHere();
2963     }
2964     __ br(acond, ok);
2965   }
2966   if (op->halt()) {
2967     const char* str = __ code_string(op->msg());
2968     __ stop(str);
2969   } else {
2970     breakpoint();
2971   }
2972   __ bind(ok);
2973 }
2974 #endif
2975 
2976 #ifndef PRODUCT
2977 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2978 #else
2979 #define COMMENT(x)
2980 #endif
2981 
2982 void LIR_Assembler::membar() {
2983   COMMENT("membar");
2984   __ membar(MacroAssembler::AnyAny);
2985 }
2986 
2987 void LIR_Assembler::membar_acquire() {
2988   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2989 }
2990 
2991 void LIR_Assembler::membar_release() {
2992   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2993 }
2994 
2995 void LIR_Assembler::membar_loadload() {
2996   __ membar(Assembler::LoadLoad);
2997 }
2998 
2999 void LIR_Assembler::membar_storestore() {
3000   __ membar(MacroAssembler::StoreStore);
3001 }
3002 
3003 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3004 
3005 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3006 
3007 void LIR_Assembler::on_spin_wait() {
3008   Unimplemented();
3009 }
3010 
3011 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3012   __ mov(result_reg->as_register(), rthread);
3013 }
3014 
3015 
3016 void LIR_Assembler::peephole(LIR_List *lir) {
3017 #if 0
3018   if (tableswitch_count >= max_tableswitches)
3019     return;
3020 
3021   /*
3022     This finite-state automaton recognizes sequences of compare-and-
3023     branch instructions.  We will turn them into a tableswitch.  You
3024     could argue that C1 really shouldn't be doing this sort of
3025     optimization, but without it the code is really horrible.
3026   */
3027 
3028   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3029   int first_key, last_key = -2147483648;
3030   int next_key = 0;
3031   int start_insn = -1;
3032   int last_insn = -1;
3033   Register reg = noreg;
3034   LIR_Opr reg_opr;
3035   state = start_s;
3036 
3037   LIR_OpList* inst = lir->instructions_list();
3038   for (int i = 0; i < inst->length(); i++) {
3039     LIR_Op* op = inst->at(i);
3040     switch (state) {
3041     case start_s:
3042       first_key = -1;
3043       start_insn = i;
3044       switch (op->code()) {
3045       case lir_cmp:
3046         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3047         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3048         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3049             && opr2->is_constant()
3050             && opr2->type() == T_INT) {
3051           reg_opr = opr1;
3052           reg = opr1->as_register();
3053           first_key = opr2->as_constant_ptr()->as_jint();
3054           next_key = first_key + 1;
3055           state = cmp_s;
3056           goto next_state;
3057         }
3058         break;
3059       }
3060       break;
3061     case cmp_s:
3062       switch (op->code()) {
3063       case lir_branch:
3064         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3065           state = beq_s;
3066           last_insn = i;
3067           goto next_state;
3068         }
3069       }
3070       state = start_s;
3071       break;
3072     case beq_s:
3073       switch (op->code()) {
3074       case lir_cmp: {
3075         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3076         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3077         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3078             && opr1->as_register() == reg
3079             && opr2->is_constant()
3080             && opr2->type() == T_INT
3081             && opr2->as_constant_ptr()->as_jint() == next_key) {
3082           last_key = next_key;
3083           next_key++;
3084           state = cmp_s;
3085           goto next_state;
3086         }
3087       }
3088       }
3089       last_key = next_key;
3090       state = start_s;
3091       break;
3092     default:
3093       assert(false, "impossible state");
3094     }
3095     if (state == start_s) {
3096       if (first_key < last_key - 5L && reg != noreg) {
3097         {
3098           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3099           //        reg->encoding(),
3100           //        start_insn, first_key, last_key);
3101           //   for (int i = 0; i < inst->length(); i++) {
3102           //     inst->at(i)->print();
3103           //     tty->print("\n");
3104           //   }
3105           //   tty->print("\n");
3106         }
3107 
3108         struct tableswitch *sw = &switches[tableswitch_count];
3109         sw->_insn_index = start_insn, sw->_first_key = first_key,
3110           sw->_last_key = last_key, sw->_reg = reg;
3111         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3112         {
3113           // Insert the new table of branches
3114           int offset = last_insn;
3115           for (int n = first_key; n < last_key; n++) {
3116             inst->insert_before
3117               (last_insn + 1,
3118                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3119                                 inst->at(offset)->as_OpBranch()->label()));
3120             offset -= 2, i++;
3121           }
3122         }
3123         // Delete all the old compare-and-branch instructions
3124         for (int n = first_key; n < last_key; n++) {
3125           inst->remove_at(start_insn);
3126           inst->remove_at(start_insn);
3127         }
3128         // Insert the tableswitch instruction
3129         inst->insert_before(start_insn,
3130                             new LIR_Op2(lir_cmp, lir_cond_always,
3131                                         LIR_OprFact::intConst(tableswitch_count),
3132                                         reg_opr));
3133         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3134         tableswitch_count++;
3135       }
3136       reg = noreg;
3137       last_key = -2147483648;
3138     }
3139   next_state:
3140     ;
3141   }
3142 #endif
3143 }
3144 
3145 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3146   Address addr = as_Address(src->as_address_ptr(), noreg);
3147   BasicType type = src->type();
3148   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3149 
3150   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3151   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3152 
3153   switch(type) {
3154   case T_INT:
3155     xchg = &MacroAssembler::atomic_xchgalw;
3156     add = &MacroAssembler::atomic_addalw;
3157     break;
3158   case T_LONG:
3159     xchg = &MacroAssembler::atomic_xchgal;
3160     add = &MacroAssembler::atomic_addal;
3161     break;
3162   case T_OBJECT:
3163   case T_ARRAY:
3164     if (UseCompressedOops) {
3165       xchg = &MacroAssembler::atomic_xchgalw;
3166       add = &MacroAssembler::atomic_addalw;
3167     } else {
3168       xchg = &MacroAssembler::atomic_xchgal;
3169       add = &MacroAssembler::atomic_addal;
3170     }
3171     break;
3172   default:
3173     ShouldNotReachHere();
3174     xchg = &MacroAssembler::atomic_xchgal;
3175     add = &MacroAssembler::atomic_addal; // unreachable
3176   }
3177 
3178   switch (code) {
3179   case lir_xadd:
3180     {
3181       RegisterOrConstant inc;
3182       Register tmp = as_reg(tmp_op);
3183       Register dst = as_reg(dest);
3184       if (data->is_constant()) {
3185         inc = RegisterOrConstant(as_long(data));
3186         assert_different_registers(dst, addr.base(), tmp,
3187                                    rscratch1, rscratch2);
3188       } else {
3189         inc = RegisterOrConstant(as_reg(data));
3190         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3191                                    rscratch1, rscratch2);
3192       }
3193       __ lea(tmp, addr);
3194       (_masm->*add)(dst, inc, tmp);
3195       break;
3196     }
3197   case lir_xchg:
3198     {
3199       Register tmp = tmp_op->as_register();
3200       Register obj = as_reg(data);
3201       Register dst = as_reg(dest);
3202       if (is_oop && UseCompressedOops) {
3203         __ encode_heap_oop(rscratch2, obj);
3204         obj = rscratch2;
3205       }
3206       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3207       __ lea(tmp, addr);
3208       (_masm->*xchg)(dst, obj, tmp);
3209       if (is_oop && UseCompressedOops) {
3210         __ decode_heap_oop(dst);
3211       }
3212     }
3213     break;
3214   default:
3215     ShouldNotReachHere();
3216   }
3217   __ membar(__ AnyAny);
3218 }
3219 
3220 #undef __