1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/cardTableBarrierSet.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "vmreg_aarch64.inline.hpp"
  44 
  45 
  46 
  47 #ifndef PRODUCT
  48 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  49 #else
  50 #define COMMENT(x)
  51 #endif
  52 
  53 NEEDS_CLEANUP // remove this definitions ?
  54 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 
  61 static void select_different_registers(Register preserve,
  62                                        Register extra,
  63                                        Register &tmp1,
  64                                        Register &tmp2) {
  65   if (tmp1 == preserve) {
  66     assert_different_registers(tmp1, tmp2, extra);
  67     tmp1 = extra;
  68   } else if (tmp2 == preserve) {
  69     assert_different_registers(tmp1, tmp2, extra);
  70     tmp2 = extra;
  71   }
  72   assert_different_registers(preserve, tmp1, tmp2);
  73 }
  74 
  75 
  76 
  77 static void select_different_registers(Register preserve,
  78                                        Register extra,
  79                                        Register &tmp1,
  80                                        Register &tmp2,
  81                                        Register &tmp3) {
  82   if (tmp1 == preserve) {
  83     assert_different_registers(tmp1, tmp2, tmp3, extra);
  84     tmp1 = extra;
  85   } else if (tmp2 == preserve) {
  86     assert_different_registers(tmp1, tmp2, tmp3, extra);
  87     tmp2 = extra;
  88   } else if (tmp3 == preserve) {
  89     assert_different_registers(tmp1, tmp2, tmp3, extra);
  90     tmp3 = extra;
  91   }
  92   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  93 }
  94 
  95 
  96 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  97 
  98 
  99 LIR_Opr LIR_Assembler::receiverOpr() {
 100   return FrameMap::receiver_opr;
 101 }
 102 
 103 LIR_Opr LIR_Assembler::osrBufferPointer() {
 104   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 105 }
 106 
 107 //--------------fpu register translations-----------------------
 108 
 109 
 110 address LIR_Assembler::float_constant(float f) {
 111   address const_addr = __ float_constant(f);
 112   if (const_addr == NULL) {
 113     bailout("const section overflow");
 114     return __ code()->consts()->start();
 115   } else {
 116     return const_addr;
 117   }
 118 }
 119 
 120 
 121 address LIR_Assembler::double_constant(double d) {
 122   address const_addr = __ double_constant(d);
 123   if (const_addr == NULL) {
 124     bailout("const section overflow");
 125     return __ code()->consts()->start();
 126   } else {
 127     return const_addr;
 128   }
 129 }
 130 
 131 address LIR_Assembler::int_constant(jlong n) {
 132   address const_addr = __ long_constant(n);
 133   if (const_addr == NULL) {
 134     bailout("const section overflow");
 135     return __ code()->consts()->start();
 136   } else {
 137     return const_addr;
 138   }
 139 }
 140 
 141 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
 142 
 143 void LIR_Assembler::reset_FPU() { Unimplemented(); }
 144 
 145 void LIR_Assembler::fpop() { Unimplemented(); }
 146 
 147 void LIR_Assembler::fxch(int i) { Unimplemented(); }
 148 
 149 void LIR_Assembler::fld(int i) { Unimplemented(); }
 150 
 151 void LIR_Assembler::ffree(int i) { Unimplemented(); }
 152 
 153 void LIR_Assembler::breakpoint() { Unimplemented(); }
 154 
 155 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 156 
 157 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 158 
 159 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 160 //-------------------------------------------
 161 
 162 static Register as_reg(LIR_Opr op) {
 163   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 164 }
 165 
 166 static jlong as_long(LIR_Opr data) {
 167   jlong result;
 168   switch (data->type()) {
 169   case T_INT:
 170     result = (data->as_jint());
 171     break;
 172   case T_LONG:
 173     result = (data->as_jlong());
 174     break;
 175   default:
 176     ShouldNotReachHere();
 177     result = 0;  // unreachable
 178   }
 179   return result;
 180 }
 181 
 182 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 183   Register base = addr->base()->as_pointer_register();
 184   LIR_Opr opr = addr->index();
 185   if (opr->is_cpu_register()) {
 186     Register index;
 187     if (opr->is_single_cpu())
 188       index = opr->as_register();
 189     else
 190       index = opr->as_register_lo();
 191     assert(addr->disp() == 0, "must be");
 192     switch(opr->type()) {
 193       case T_INT:
 194         return Address(base, index, Address::sxtw(addr->scale()));
 195       case T_LONG:
 196         return Address(base, index, Address::lsl(addr->scale()));
 197       default:
 198         ShouldNotReachHere();
 199       }
 200   } else  {
 201     intptr_t addr_offset = intptr_t(addr->disp());
 202     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
 203       return Address(base, addr_offset, Address::lsl(addr->scale()));
 204     else {
 205       __ mov(tmp, addr_offset);
 206       return Address(base, tmp, Address::lsl(addr->scale()));
 207     }
 208   }
 209   return Address();
 210 }
 211 
 212 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 213   ShouldNotReachHere();
 214   return Address();
 215 }
 216 
 217 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 218   return as_Address(addr, rscratch1);
 219 }
 220 
 221 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 222   return as_Address(addr, rscratch1);  // Ouch
 223   // FIXME: This needs to be much more clever.  See x86.
 224 }
 225 
 226 
 227 void LIR_Assembler::osr_entry() {
 228   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 229   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 230   ValueStack* entry_state = osr_entry->state();
 231   int number_of_locks = entry_state->locks_size();
 232 
 233   // we jump here if osr happens with the interpreter
 234   // state set up to continue at the beginning of the
 235   // loop that triggered osr - in particular, we have
 236   // the following registers setup:
 237   //
 238   // r2: osr buffer
 239   //
 240 
 241   // build frame
 242   ciMethod* m = compilation()->method();
 243   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 244 
 245   // OSR buffer is
 246   //
 247   // locals[nlocals-1..0]
 248   // monitors[0..number_of_locks]
 249   //
 250   // locals is a direct copy of the interpreter frame so in the osr buffer
 251   // so first slot in the local array is the last local from the interpreter
 252   // and last slot is local[0] (receiver) from the interpreter
 253   //
 254   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 255   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 256   // in the interpreter frame (the method lock if a sync method)
 257 
 258   // Initialize monitors in the compiled activation.
 259   //   r2: pointer to osr buffer
 260   //
 261   // All other registers are dead at this point and the locals will be
 262   // copied into place by code emitted in the IR.
 263 
 264   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 265   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 266     int monitor_offset = BytesPerWord * method()->max_locals() +
 267       (2 * BytesPerWord) * (number_of_locks - 1);
 268     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 269     // the OSR buffer using 2 word entries: first the lock and then
 270     // the oop.
 271     for (int i = 0; i < number_of_locks; i++) {
 272       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 273 #ifdef ASSERT
 274       // verify the interpreter's monitor has a non-null object
 275       {
 276         Label L;
 277         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 278         __ cbnz(rscratch1, L);
 279         __ stop("locked object is NULL");
 280         __ bind(L);
 281       }
 282 #endif
 283       __ ldr(r19, Address(OSR_buf, slot_offset + 0));
 284       __ str(r19, frame_map()->address_for_monitor_lock(i));
 285       __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 286       __ str(r19, frame_map()->address_for_monitor_object(i));
 287     }
 288   }
 289 }
 290 
 291 
 292 // inline cache check; done before the frame is built.
 293 int LIR_Assembler::check_icache() {
 294   Register receiver = FrameMap::receiver_opr->as_register();
 295   Register ic_klass = IC_Klass;
 296   int start_offset = __ offset();
 297   __ inline_cache_check(receiver, ic_klass);
 298 
 299   // if icache check fails, then jump to runtime routine
 300   // Note: RECEIVER must still contain the receiver!
 301   Label dont;
 302   __ br(Assembler::EQ, dont);
 303   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 304 
 305   // We align the verified entry point unless the method body
 306   // (including its inline cache check) will fit in a single 64-byte
 307   // icache line.
 308   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 309     // force alignment after the cache check.
 310     __ align(CodeEntryAlignment);
 311   }
 312 
 313   __ bind(dont);
 314   return start_offset;
 315 }
 316 
 317 
 318 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 319   if (o == NULL) {
 320     __ mov(reg, zr);
 321   } else {
 322     __ movoop(reg, o, /*immediate*/true);
 323   }
 324 }
 325 
 326 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 327   address target = NULL;
 328   relocInfo::relocType reloc_type = relocInfo::none;
 329 
 330   switch (patching_id(info)) {
 331   case PatchingStub::access_field_id:
 332     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 333     reloc_type = relocInfo::section_word_type;
 334     break;
 335   case PatchingStub::load_klass_id:
 336     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 337     reloc_type = relocInfo::metadata_type;
 338     break;
 339   case PatchingStub::load_mirror_id:
 340     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 341     reloc_type = relocInfo::oop_type;
 342     break;
 343   case PatchingStub::load_appendix_id:
 344     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 345     reloc_type = relocInfo::oop_type;
 346     break;
 347   default: ShouldNotReachHere();
 348   }
 349 
 350   __ far_call(RuntimeAddress(target));
 351   add_call_info_here(info);
 352 }
 353 
 354 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 355   deoptimize_trap(info);
 356 }
 357 
 358 
 359 // This specifies the rsp decrement needed to build the frame
 360 int LIR_Assembler::initial_frame_size_in_bytes() const {
 361   // if rounding, must let FrameMap know!
 362 
 363   // The frame_map records size in slots (32bit word)
 364 
 365   // subtract two words to account for return address and link
 366   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 367 }
 368 
 369 
 370 int LIR_Assembler::emit_exception_handler() {
 371   // if the last instruction is a call (typically to do a throw which
 372   // is coming at the end after block reordering) the return address
 373   // must still point into the code area in order to avoid assertion
 374   // failures when searching for the corresponding bci => add a nop
 375   // (was bug 5/14/1999 - gri)
 376   __ nop();
 377 
 378   // generate code for exception handler
 379   address handler_base = __ start_a_stub(exception_handler_size());
 380   if (handler_base == NULL) {
 381     // not enough space left for the handler
 382     bailout("exception handler overflow");
 383     return -1;
 384   }
 385 
 386   int offset = code_offset();
 387 
 388   // the exception oop and pc are in r0, and r3
 389   // no other registers need to be preserved, so invalidate them
 390   __ invalidate_registers(false, true, true, false, true, true);
 391 
 392   // check that there is really an exception
 393   __ verify_not_null_oop(r0);
 394 
 395   // search an exception handler (r0: exception oop, r3: throwing pc)
 396   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 397   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 398   __ end_a_stub();
 399 
 400   return offset;
 401 }
 402 
 403 
 404 // Emit the code to remove the frame from the stack in the exception
 405 // unwind path.
 406 int LIR_Assembler::emit_unwind_handler() {
 407 #ifndef PRODUCT
 408   if (CommentedAssembly) {
 409     _masm->block_comment("Unwind handler");
 410   }
 411 #endif
 412 
 413   int offset = code_offset();
 414 
 415   // Fetch the exception from TLS and clear out exception related thread state
 416   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 417   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 418   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 419 
 420   __ bind(_unwind_handler_entry);
 421   __ verify_not_null_oop(r0);
 422   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 423     __ mov(r19, r0);  // Preserve the exception
 424   }
 425 
 426   // Preform needed unlocking
 427   MonitorExitStub* stub = NULL;
 428   if (method()->is_synchronized()) {
 429     monitor_address(0, FrameMap::r0_opr);
 430     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 431     __ unlock_object(r5, r4, r0, *stub->entry());
 432     __ bind(*stub->continuation());
 433   }
 434 
 435   if (compilation()->env()->dtrace_method_probes()) {
 436     __ call_Unimplemented();
 437 #if 0
 438     __ movptr(Address(rsp, 0), rax);
 439     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 440     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 441 #endif
 442   }
 443 
 444   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 445     __ mov(r0, r19);  // Restore the exception
 446   }
 447 
 448   // remove the activation and dispatch to the unwind handler
 449   __ block_comment("remove_frame and dispatch to the unwind handler");
 450   __ remove_frame(initial_frame_size_in_bytes());
 451   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 452 
 453   // Emit the slow path assembly
 454   if (stub != NULL) {
 455     stub->emit_code(this);
 456   }
 457 
 458   return offset;
 459 }
 460 
 461 
 462 int LIR_Assembler::emit_deopt_handler() {
 463   // if the last instruction is a call (typically to do a throw which
 464   // is coming at the end after block reordering) the return address
 465   // must still point into the code area in order to avoid assertion
 466   // failures when searching for the corresponding bci => add a nop
 467   // (was bug 5/14/1999 - gri)
 468   __ nop();
 469 
 470   // generate code for exception handler
 471   address handler_base = __ start_a_stub(deopt_handler_size());
 472   if (handler_base == NULL) {
 473     // not enough space left for the handler
 474     bailout("deopt handler overflow");
 475     return -1;
 476   }
 477 
 478   int offset = code_offset();
 479 
 480   __ adr(lr, pc());
 481   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 482   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 483   __ end_a_stub();
 484 
 485   return offset;
 486 }
 487 
 488 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 489   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 490   int pc_offset = code_offset();
 491   flush_debug_info(pc_offset);
 492   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 493   if (info->exception_handlers() != NULL) {
 494     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 495   }
 496 }
 497 
 498 void LIR_Assembler::return_op(LIR_Opr result) {
 499   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 500 
 501   // Pop the stack before the safepoint code
 502   __ remove_frame(initial_frame_size_in_bytes());
 503 
 504   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 505     __ reserved_stack_check();
 506   }
 507 
 508   address polling_page(os::get_polling_page());
 509   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 510   __ ret(lr);
 511 }
 512 
 513 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 514   address polling_page(os::get_polling_page());
 515   guarantee(info != NULL, "Shouldn't be NULL");
 516   assert(os::is_poll_address(polling_page), "should be");
 517   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 518   add_debug_info_for_branch(info);  // This isn't just debug info:
 519                                     // it's the oop map
 520   __ read_polling_page(rscratch1, relocInfo::poll_type);
 521   return __ offset();
 522 }
 523 
 524 
 525 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 526   if (from_reg == r31_sp)
 527     from_reg = sp;
 528   if (to_reg == r31_sp)
 529     to_reg = sp;
 530   __ mov(to_reg, from_reg);
 531 }
 532 
 533 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 534 
 535 
 536 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 537   assert(src->is_constant(), "should not call otherwise");
 538   assert(dest->is_register(), "should not call otherwise");
 539   LIR_Const* c = src->as_constant_ptr();
 540 
 541   switch (c->type()) {
 542     case T_INT: {
 543       assert(patch_code == lir_patch_none, "no patching handled here");
 544       __ movw(dest->as_register(), c->as_jint());
 545       break;
 546     }
 547 
 548     case T_ADDRESS: {
 549       assert(patch_code == lir_patch_none, "no patching handled here");
 550       __ mov(dest->as_register(), c->as_jint());
 551       break;
 552     }
 553 
 554     case T_LONG: {
 555       assert(patch_code == lir_patch_none, "no patching handled here");
 556       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 557       break;
 558     }
 559 
 560     case T_OBJECT: {
 561         if (patch_code == lir_patch_none) {
 562           jobject2reg(c->as_jobject(), dest->as_register());
 563         } else {
 564           jobject2reg_with_patching(dest->as_register(), info);
 565         }
 566       break;
 567     }
 568 
 569     case T_METADATA: {
 570       if (patch_code != lir_patch_none) {
 571         klass2reg_with_patching(dest->as_register(), info);
 572       } else {
 573         __ mov_metadata(dest->as_register(), c->as_metadata());
 574       }
 575       break;
 576     }
 577 
 578     case T_FLOAT: {
 579       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 580         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 581       } else {
 582         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 583         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 584       }
 585       break;
 586     }
 587 
 588     case T_DOUBLE: {
 589       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 590         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 591       } else {
 592         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 593         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 594       }
 595       break;
 596     }
 597 
 598     default:
 599       ShouldNotReachHere();
 600   }
 601 }
 602 
 603 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 604   LIR_Const* c = src->as_constant_ptr();
 605   switch (c->type()) {
 606   case T_OBJECT:
 607     {
 608       if (! c->as_jobject())
 609         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 610       else {
 611         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 612         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 613       }
 614     }
 615     break;
 616   case T_ADDRESS:
 617     {
 618       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 619       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 620     }
 621   case T_INT:
 622   case T_FLOAT:
 623     {
 624       Register reg = zr;
 625       if (c->as_jint_bits() == 0)
 626         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 627       else {
 628         __ movw(rscratch1, c->as_jint_bits());
 629         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 630       }
 631     }
 632     break;
 633   case T_LONG:
 634   case T_DOUBLE:
 635     {
 636       Register reg = zr;
 637       if (c->as_jlong_bits() == 0)
 638         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 639                                                  lo_word_offset_in_bytes));
 640       else {
 641         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 642         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 643                                                         lo_word_offset_in_bytes));
 644       }
 645     }
 646     break;
 647   default:
 648     ShouldNotReachHere();
 649   }
 650 }
 651 
 652 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 653   assert(src->is_constant(), "should not call otherwise");
 654   LIR_Const* c = src->as_constant_ptr();
 655   LIR_Address* to_addr = dest->as_address_ptr();
 656 
 657   void (Assembler::* insn)(Register Rt, const Address &adr);
 658 
 659   switch (type) {
 660   case T_ADDRESS:
 661     assert(c->as_jint() == 0, "should be");
 662     insn = &Assembler::str;
 663     break;
 664   case T_LONG:
 665     assert(c->as_jlong() == 0, "should be");
 666     insn = &Assembler::str;
 667     break;
 668   case T_INT:
 669     assert(c->as_jint() == 0, "should be");
 670     insn = &Assembler::strw;
 671     break;
 672   case T_OBJECT:
 673   case T_ARRAY:
 674     assert(c->as_jobject() == 0, "should be");
 675     if (UseCompressedOops && !wide) {
 676       insn = &Assembler::strw;
 677     } else {
 678       insn = &Assembler::str;
 679     }
 680     break;
 681   case T_CHAR:
 682   case T_SHORT:
 683     assert(c->as_jint() == 0, "should be");
 684     insn = &Assembler::strh;
 685     break;
 686   case T_BOOLEAN:
 687   case T_BYTE:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::strb;
 690     break;
 691   default:
 692     ShouldNotReachHere();
 693     insn = &Assembler::str;  // unreachable
 694   }
 695 
 696   if (info) add_debug_info_for_null_check_here(info);
 697   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 698 }
 699 
 700 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 701   assert(src->is_register(), "should not call otherwise");
 702   assert(dest->is_register(), "should not call otherwise");
 703 
 704   // move between cpu-registers
 705   if (dest->is_single_cpu()) {
 706     if (src->type() == T_LONG) {
 707       // Can do LONG -> OBJECT
 708       move_regs(src->as_register_lo(), dest->as_register());
 709       return;
 710     }
 711     assert(src->is_single_cpu(), "must match");
 712     if (src->type() == T_OBJECT) {
 713       __ verify_oop(src->as_register());
 714     }
 715     move_regs(src->as_register(), dest->as_register());
 716 
 717   } else if (dest->is_double_cpu()) {
 718     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 719       // Surprising to me but we can see move of a long to t_object
 720       __ verify_oop(src->as_register());
 721       move_regs(src->as_register(), dest->as_register_lo());
 722       return;
 723     }
 724     assert(src->is_double_cpu(), "must match");
 725     Register f_lo = src->as_register_lo();
 726     Register f_hi = src->as_register_hi();
 727     Register t_lo = dest->as_register_lo();
 728     Register t_hi = dest->as_register_hi();
 729     assert(f_hi == f_lo, "must be same");
 730     assert(t_hi == t_lo, "must be same");
 731     move_regs(f_lo, t_lo);
 732 
 733   } else if (dest->is_single_fpu()) {
 734     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 735 
 736   } else if (dest->is_double_fpu()) {
 737     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 738 
 739   } else {
 740     ShouldNotReachHere();
 741   }
 742 }
 743 
 744 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 745   if (src->is_single_cpu()) {
 746     if (type == T_ARRAY || type == T_OBJECT) {
 747       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 748       __ verify_oop(src->as_register());
 749     } else if (type == T_METADATA || type == T_DOUBLE) {
 750       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 751     } else {
 752       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 753     }
 754 
 755   } else if (src->is_double_cpu()) {
 756     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 757     __ str(src->as_register_lo(), dest_addr_LO);
 758 
 759   } else if (src->is_single_fpu()) {
 760     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 761     __ strs(src->as_float_reg(), dest_addr);
 762 
 763   } else if (src->is_double_fpu()) {
 764     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 765     __ strd(src->as_double_reg(), dest_addr);
 766 
 767   } else {
 768     ShouldNotReachHere();
 769   }
 770 
 771 }
 772 
 773 
 774 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 775   LIR_Address* to_addr = dest->as_address_ptr();
 776   PatchingStub* patch = NULL;
 777   Register compressed_src = rscratch1;
 778 
 779   if (patch_code != lir_patch_none) {
 780     deoptimize_trap(info);
 781     return;
 782   }
 783 
 784   if (type == T_ARRAY || type == T_OBJECT) {
 785     __ verify_oop(src->as_register());
 786 
 787     if (UseCompressedOops && !wide) {
 788       __ encode_heap_oop(compressed_src, src->as_register());
 789     } else {
 790       compressed_src = src->as_register();
 791     }
 792   }
 793 
 794   int null_check_here = code_offset();
 795   switch (type) {
 796     case T_FLOAT: {
 797       __ strs(src->as_float_reg(), as_Address(to_addr));
 798       break;
 799     }
 800 
 801     case T_DOUBLE: {
 802       __ strd(src->as_double_reg(), as_Address(to_addr));
 803       break;
 804     }
 805 
 806     case T_ARRAY:   // fall through
 807     case T_OBJECT:  // fall through
 808       if (UseCompressedOops && !wide) {
 809         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 810       } else {
 811          __ str(compressed_src, as_Address(to_addr));
 812       }
 813       break;
 814     case T_METADATA:
 815       // We get here to store a method pointer to the stack to pass to
 816       // a dtrace runtime call. This can't work on 64 bit with
 817       // compressed klass ptrs: T_METADATA can be a compressed klass
 818       // ptr or a 64 bit method pointer.
 819       ShouldNotReachHere();
 820       __ str(src->as_register(), as_Address(to_addr));
 821       break;
 822     case T_ADDRESS:
 823       __ str(src->as_register(), as_Address(to_addr));
 824       break;
 825     case T_INT:
 826       __ strw(src->as_register(), as_Address(to_addr));
 827       break;
 828 
 829     case T_LONG: {
 830       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 831       break;
 832     }
 833 
 834     case T_BYTE:    // fall through
 835     case T_BOOLEAN: {
 836       __ strb(src->as_register(), as_Address(to_addr));
 837       break;
 838     }
 839 
 840     case T_CHAR:    // fall through
 841     case T_SHORT:
 842       __ strh(src->as_register(), as_Address(to_addr));
 843       break;
 844 
 845     default:
 846       ShouldNotReachHere();
 847   }
 848   if (info != NULL) {
 849     add_debug_info_for_null_check(null_check_here, info);
 850   }
 851 }
 852 
 853 
 854 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 855   assert(src->is_stack(), "should not call otherwise");
 856   assert(dest->is_register(), "should not call otherwise");
 857 
 858   if (dest->is_single_cpu()) {
 859     if (type == T_ARRAY || type == T_OBJECT) {
 860       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 861       __ verify_oop(dest->as_register());
 862     } else if (type == T_METADATA) {
 863       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 864     } else {
 865       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 866     }
 867 
 868   } else if (dest->is_double_cpu()) {
 869     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 870     __ ldr(dest->as_register_lo(), src_addr_LO);
 871 
 872   } else if (dest->is_single_fpu()) {
 873     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 874     __ ldrs(dest->as_float_reg(), src_addr);
 875 
 876   } else if (dest->is_double_fpu()) {
 877     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 878     __ ldrd(dest->as_double_reg(), src_addr);
 879 
 880   } else {
 881     ShouldNotReachHere();
 882   }
 883 }
 884 
 885 
 886 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 887   address target = NULL;
 888   relocInfo::relocType reloc_type = relocInfo::none;
 889 
 890   switch (patching_id(info)) {
 891   case PatchingStub::access_field_id:
 892     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 893     reloc_type = relocInfo::section_word_type;
 894     break;
 895   case PatchingStub::load_klass_id:
 896     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 897     reloc_type = relocInfo::metadata_type;
 898     break;
 899   case PatchingStub::load_mirror_id:
 900     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 901     reloc_type = relocInfo::oop_type;
 902     break;
 903   case PatchingStub::load_appendix_id:
 904     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 905     reloc_type = relocInfo::oop_type;
 906     break;
 907   default: ShouldNotReachHere();
 908   }
 909 
 910   __ far_call(RuntimeAddress(target));
 911   add_call_info_here(info);
 912 }
 913 
 914 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 915 
 916   LIR_Opr temp;
 917   if (type == T_LONG || type == T_DOUBLE)
 918     temp = FrameMap::rscratch1_long_opr;
 919   else
 920     temp = FrameMap::rscratch1_opr;
 921 
 922   stack2reg(src, temp, src->type());
 923   reg2stack(temp, dest, dest->type(), false);
 924 }
 925 
 926 
 927 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 928   LIR_Address* addr = src->as_address_ptr();
 929   LIR_Address* from_addr = src->as_address_ptr();
 930 
 931   if (addr->base()->type() == T_OBJECT) {
 932     __ verify_oop(addr->base()->as_pointer_register());
 933   }
 934 
 935   if (patch_code != lir_patch_none) {
 936     deoptimize_trap(info);
 937     return;
 938   }
 939 
 940   if (info != NULL) {
 941     add_debug_info_for_null_check_here(info);
 942   }
 943   int null_check_here = code_offset();
 944   switch (type) {
 945     case T_FLOAT: {
 946       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 947       break;
 948     }
 949 
 950     case T_DOUBLE: {
 951       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 952       break;
 953     }
 954 
 955     case T_ARRAY:   // fall through
 956     case T_OBJECT:  // fall through
 957       if (UseCompressedOops && !wide) {
 958         __ ldrw(dest->as_register(), as_Address(from_addr));
 959       } else {
 960          __ ldr(dest->as_register(), as_Address(from_addr));
 961       }
 962       break;
 963     case T_METADATA:
 964       // We get here to store a method pointer to the stack to pass to
 965       // a dtrace runtime call. This can't work on 64 bit with
 966       // compressed klass ptrs: T_METADATA can be a compressed klass
 967       // ptr or a 64 bit method pointer.
 968       ShouldNotReachHere();
 969       __ ldr(dest->as_register(), as_Address(from_addr));
 970       break;
 971     case T_ADDRESS:
 972       // FIXME: OMG this is a horrible kludge.  Any offset from an
 973       // address that matches klass_offset_in_bytes() will be loaded
 974       // as a word, not a long.
 975       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 976         __ ldrw(dest->as_register(), as_Address(from_addr));
 977       } else {
 978         __ ldr(dest->as_register(), as_Address(from_addr));
 979       }
 980       break;
 981     case T_INT:
 982       __ ldrw(dest->as_register(), as_Address(from_addr));
 983       break;
 984 
 985     case T_LONG: {
 986       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 987       break;
 988     }
 989 
 990     case T_BYTE:
 991       __ ldrsb(dest->as_register(), as_Address(from_addr));
 992       break;
 993     case T_BOOLEAN: {
 994       __ ldrb(dest->as_register(), as_Address(from_addr));
 995       break;
 996     }
 997 
 998     case T_CHAR:
 999       __ ldrh(dest->as_register(), as_Address(from_addr));
1000       break;
1001     case T_SHORT:
1002       __ ldrsh(dest->as_register(), as_Address(from_addr));
1003       break;
1004 
1005     default:
1006       ShouldNotReachHere();
1007   }
1008 
1009   if (type == T_ARRAY || type == T_OBJECT) {
1010     if (UseCompressedOops && !wide) {
1011       __ decode_heap_oop(dest->as_register());
1012     }
1013     __ verify_oop(dest->as_register());
1014   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1015     if (UseCompressedClassPointers) {
1016       __ decode_klass_not_null(dest->as_register());
1017     }
1018   }
1019 }
1020 
1021 
1022 int LIR_Assembler::array_element_size(BasicType type) const {
1023   int elem_size = type2aelembytes(type);
1024   return exact_log2(elem_size);
1025 }
1026 
1027 void LIR_Assembler::arithmetic_idiv(LIR_Op3* op, bool is_irem) {
1028   Register Rdividend = op->in_opr1()->as_register();
1029   Register Rdivisor  = op->in_opr2()->as_register();
1030   Register Rscratch  = op->in_opr3()->as_register();
1031   Register Rresult   = op->result_opr()->as_register();
1032   int divisor = -1;
1033 
1034   /*
1035   TODO: For some reason, using the Rscratch that gets passed in is
1036   not possible because the register allocator does not see the tmp reg
1037   as used, and assignes it the same register as Rdividend. We use rscratch1
1038    instead.
1039 
1040   assert(Rdividend != Rscratch, "");
1041   assert(Rdivisor  != Rscratch, "");
1042   */
1043 
1044   if (Rdivisor == noreg && is_power_of_2(divisor)) {
1045     // convert division by a power of two into some shifts and logical operations
1046   }
1047 
1048   __ corrected_idivl(Rresult, Rdividend, Rdivisor, is_irem, rscratch1);
1049 }
1050 
1051 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1052   switch (op->code()) {
1053   case lir_idiv:
1054     arithmetic_idiv(op, false);
1055     break;
1056   case lir_irem:
1057     arithmetic_idiv(op, true);
1058     break;
1059   case lir_fmad:
1060     __ fmaddd(op->result_opr()->as_double_reg(),
1061               op->in_opr1()->as_double_reg(),
1062               op->in_opr2()->as_double_reg(),
1063               op->in_opr3()->as_double_reg());
1064     break;
1065   case lir_fmaf:
1066     __ fmadds(op->result_opr()->as_float_reg(),
1067               op->in_opr1()->as_float_reg(),
1068               op->in_opr2()->as_float_reg(),
1069               op->in_opr3()->as_float_reg());
1070     break;
1071   default:      ShouldNotReachHere(); break;
1072   }
1073 }
1074 
1075 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1076 #ifdef ASSERT
1077   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1078   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1079   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1080 #endif
1081 
1082   if (op->cond() == lir_cond_always) {
1083     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1084     __ b(*(op->label()));
1085   } else {
1086     Assembler::Condition acond;
1087     if (op->code() == lir_cond_float_branch) {
1088       bool is_unordered = (op->ublock() == op->block());
1089       // Assembler::EQ does not permit unordered branches, so we add
1090       // another branch here.  Likewise, Assembler::NE does not permit
1091       // ordered branches.
1092       if (is_unordered && op->cond() == lir_cond_equal
1093           || !is_unordered && op->cond() == lir_cond_notEqual)
1094         __ br(Assembler::VS, *(op->ublock()->label()));
1095       switch(op->cond()) {
1096       case lir_cond_equal:        acond = Assembler::EQ; break;
1097       case lir_cond_notEqual:     acond = Assembler::NE; break;
1098       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1099       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1100       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1101       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1102       default:                    ShouldNotReachHere();
1103         acond = Assembler::EQ;  // unreachable
1104       }
1105     } else {
1106       switch (op->cond()) {
1107         case lir_cond_equal:        acond = Assembler::EQ; break;
1108         case lir_cond_notEqual:     acond = Assembler::NE; break;
1109         case lir_cond_less:         acond = Assembler::LT; break;
1110         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1111         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1112         case lir_cond_greater:      acond = Assembler::GT; break;
1113         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1114         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1115         default:                    ShouldNotReachHere();
1116           acond = Assembler::EQ;  // unreachable
1117       }
1118     }
1119     __ br(acond,*(op->label()));
1120   }
1121 }
1122 
1123 
1124 
1125 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1126   LIR_Opr src  = op->in_opr();
1127   LIR_Opr dest = op->result_opr();
1128 
1129   switch (op->bytecode()) {
1130     case Bytecodes::_i2f:
1131       {
1132         __ scvtfws(dest->as_float_reg(), src->as_register());
1133         break;
1134       }
1135     case Bytecodes::_i2d:
1136       {
1137         __ scvtfwd(dest->as_double_reg(), src->as_register());
1138         break;
1139       }
1140     case Bytecodes::_l2d:
1141       {
1142         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1143         break;
1144       }
1145     case Bytecodes::_l2f:
1146       {
1147         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1148         break;
1149       }
1150     case Bytecodes::_f2d:
1151       {
1152         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1153         break;
1154       }
1155     case Bytecodes::_d2f:
1156       {
1157         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1158         break;
1159       }
1160     case Bytecodes::_i2c:
1161       {
1162         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1163         break;
1164       }
1165     case Bytecodes::_i2l:
1166       {
1167         __ sxtw(dest->as_register_lo(), src->as_register());
1168         break;
1169       }
1170     case Bytecodes::_i2s:
1171       {
1172         __ sxth(dest->as_register(), src->as_register());
1173         break;
1174       }
1175     case Bytecodes::_i2b:
1176       {
1177         __ sxtb(dest->as_register(), src->as_register());
1178         break;
1179       }
1180     case Bytecodes::_l2i:
1181       {
1182         _masm->block_comment("FIXME: This could be a no-op");
1183         __ uxtw(dest->as_register(), src->as_register_lo());
1184         break;
1185       }
1186     case Bytecodes::_d2l:
1187       {
1188         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1189         break;
1190       }
1191     case Bytecodes::_f2i:
1192       {
1193         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1194         break;
1195       }
1196     case Bytecodes::_f2l:
1197       {
1198         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1199         break;
1200       }
1201     case Bytecodes::_d2i:
1202       {
1203         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1204         break;
1205       }
1206     default: ShouldNotReachHere();
1207   }
1208 }
1209 
1210 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1211   if (op->init_check()) {
1212     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1213                                InstanceKlass::init_state_offset()));
1214     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1215     add_debug_info_for_null_check_here(op->stub()->info());
1216     __ br(Assembler::NE, *op->stub()->entry());
1217   }
1218   __ allocate_object(op->obj()->as_register(),
1219                      op->tmp1()->as_register(),
1220                      op->tmp2()->as_register(),
1221                      op->header_size(),
1222                      op->object_size(),
1223                      op->klass()->as_register(),
1224                      *op->stub()->entry());
1225   __ bind(*op->stub()->continuation());
1226 }
1227 
1228 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1229   Register len =  op->len()->as_register();
1230   __ uxtw(len, len);
1231 
1232   if (UseSlowPath ||
1233       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1234       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1235     __ b(*op->stub()->entry());
1236   } else {
1237     Register tmp1 = op->tmp1()->as_register();
1238     Register tmp2 = op->tmp2()->as_register();
1239     Register tmp3 = op->tmp3()->as_register();
1240     if (len == tmp1) {
1241       tmp1 = tmp3;
1242     } else if (len == tmp2) {
1243       tmp2 = tmp3;
1244     } else if (len == tmp3) {
1245       // everything is ok
1246     } else {
1247       __ mov(tmp3, len);
1248     }
1249     __ allocate_array(op->obj()->as_register(),
1250                       len,
1251                       tmp1,
1252                       tmp2,
1253                       arrayOopDesc::header_size(op->type()),
1254                       array_element_size(op->type()),
1255                       op->klass()->as_register(),
1256                       *op->stub()->entry());
1257   }
1258   __ bind(*op->stub()->continuation());
1259 }
1260 
1261 void LIR_Assembler::type_profile_helper(Register mdo,
1262                                         ciMethodData *md, ciProfileData *data,
1263                                         Register recv, Label* update_done) {
1264   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1265     Label next_test;
1266     // See if the receiver is receiver[n].
1267     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1268     __ ldr(rscratch1, Address(rscratch2));
1269     __ cmp(recv, rscratch1);
1270     __ br(Assembler::NE, next_test);
1271     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1272     __ addptr(data_addr, DataLayout::counter_increment);
1273     __ b(*update_done);
1274     __ bind(next_test);
1275   }
1276 
1277   // Didn't find receiver; find next empty slot and fill it in
1278   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1279     Label next_test;
1280     __ lea(rscratch2,
1281            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1282     Address recv_addr(rscratch2);
1283     __ ldr(rscratch1, recv_addr);
1284     __ cbnz(rscratch1, next_test);
1285     __ str(recv, recv_addr);
1286     __ mov(rscratch1, DataLayout::counter_increment);
1287     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1288     __ str(rscratch1, Address(rscratch2));
1289     __ b(*update_done);
1290     __ bind(next_test);
1291   }
1292 }
1293 
1294 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1295   // we always need a stub for the failure case.
1296   CodeStub* stub = op->stub();
1297   Register obj = op->object()->as_register();
1298   Register k_RInfo = op->tmp1()->as_register();
1299   Register klass_RInfo = op->tmp2()->as_register();
1300   Register dst = op->result_opr()->as_register();
1301   ciKlass* k = op->klass();
1302   Register Rtmp1 = noreg;
1303 
1304   // check if it needs to be profiled
1305   ciMethodData* md;
1306   ciProfileData* data;
1307 
1308   const bool should_profile = op->should_profile();
1309 
1310   if (should_profile) {
1311     ciMethod* method = op->profiled_method();
1312     assert(method != NULL, "Should have method");
1313     int bci = op->profiled_bci();
1314     md = method->method_data_or_null();
1315     assert(md != NULL, "Sanity");
1316     data = md->bci_to_data(bci);
1317     assert(data != NULL,                "need data for type check");
1318     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1319   }
1320   Label profile_cast_success, profile_cast_failure;
1321   Label *success_target = should_profile ? &profile_cast_success : success;
1322   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1323 
1324   if (obj == k_RInfo) {
1325     k_RInfo = dst;
1326   } else if (obj == klass_RInfo) {
1327     klass_RInfo = dst;
1328   }
1329   if (k->is_loaded() && !UseCompressedClassPointers) {
1330     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1331   } else {
1332     Rtmp1 = op->tmp3()->as_register();
1333     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1334   }
1335 
1336   assert_different_registers(obj, k_RInfo, klass_RInfo);
1337 
1338     if (should_profile) {
1339       Label not_null;
1340       __ cbnz(obj, not_null);
1341       // Object is null; update MDO and exit
1342       Register mdo  = klass_RInfo;
1343       __ mov_metadata(mdo, md->constant_encoding());
1344       Address data_addr
1345         = __ form_address(rscratch2, mdo,
1346                           md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
1347                           LogBytesPerWord);
1348       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1349       __ ldr(rscratch1, data_addr);
1350       __ orr(rscratch1, rscratch1, header_bits);
1351       __ str(rscratch1, data_addr);
1352       __ b(*obj_is_null);
1353       __ bind(not_null);
1354     } else {
1355       __ cbz(obj, *obj_is_null);
1356     }
1357 
1358   if (!k->is_loaded()) {
1359     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1360   } else {
1361     __ mov_metadata(k_RInfo, k->constant_encoding());
1362   }
1363   __ verify_oop(obj);
1364 
1365   if (op->fast_check()) {
1366     // get object class
1367     // not a safepoint as obj null check happens earlier
1368     __ load_klass(rscratch1, obj);
1369     __ cmp( rscratch1, k_RInfo);
1370 
1371     __ br(Assembler::NE, *failure_target);
1372     // successful cast, fall through to profile or jump
1373   } else {
1374     // get object class
1375     // not a safepoint as obj null check happens earlier
1376     __ load_klass(klass_RInfo, obj);
1377     if (k->is_loaded()) {
1378       // See if we get an immediate positive hit
1379       __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1380       __ cmp(k_RInfo, rscratch1);
1381       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1382         __ br(Assembler::NE, *failure_target);
1383         // successful cast, fall through to profile or jump
1384       } else {
1385         // See if we get an immediate positive hit
1386         __ br(Assembler::EQ, *success_target);
1387         // check for self
1388         __ cmp(klass_RInfo, k_RInfo);
1389         __ br(Assembler::EQ, *success_target);
1390 
1391         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1392         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1393         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1394         // result is a boolean
1395         __ cbzw(klass_RInfo, *failure_target);
1396         // successful cast, fall through to profile or jump
1397       }
1398     } else {
1399       // perform the fast part of the checking logic
1400       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1401       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1402       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1403       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1404       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1405       // result is a boolean
1406       __ cbz(k_RInfo, *failure_target);
1407       // successful cast, fall through to profile or jump
1408     }
1409   }
1410   if (should_profile) {
1411     Register mdo  = klass_RInfo, recv = k_RInfo;
1412     __ bind(profile_cast_success);
1413     __ mov_metadata(mdo, md->constant_encoding());
1414     __ load_klass(recv, obj);
1415     Label update_done;
1416     type_profile_helper(mdo, md, data, recv, success);
1417     __ b(*success);
1418 
1419     __ bind(profile_cast_failure);
1420     __ mov_metadata(mdo, md->constant_encoding());
1421     Address counter_addr
1422       = __ form_address(rscratch2, mdo,
1423                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1424                         LogBytesPerWord);
1425     __ ldr(rscratch1, counter_addr);
1426     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1427     __ str(rscratch1, counter_addr);
1428     __ b(*failure);
1429   }
1430   __ b(*success);
1431 }
1432 
1433 
1434 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1435   const bool should_profile = op->should_profile();
1436 
1437   LIR_Code code = op->code();
1438   if (code == lir_store_check) {
1439     Register value = op->object()->as_register();
1440     Register array = op->array()->as_register();
1441     Register k_RInfo = op->tmp1()->as_register();
1442     Register klass_RInfo = op->tmp2()->as_register();
1443     Register Rtmp1 = op->tmp3()->as_register();
1444 
1445     CodeStub* stub = op->stub();
1446 
1447     // check if it needs to be profiled
1448     ciMethodData* md;
1449     ciProfileData* data;
1450 
1451     if (should_profile) {
1452       ciMethod* method = op->profiled_method();
1453       assert(method != NULL, "Should have method");
1454       int bci = op->profiled_bci();
1455       md = method->method_data_or_null();
1456       assert(md != NULL, "Sanity");
1457       data = md->bci_to_data(bci);
1458       assert(data != NULL,                "need data for type check");
1459       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1460     }
1461     Label profile_cast_success, profile_cast_failure, done;
1462     Label *success_target = should_profile ? &profile_cast_success : &done;
1463     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1464 
1465     if (should_profile) {
1466       Label not_null;
1467       __ cbnz(value, not_null);
1468       // Object is null; update MDO and exit
1469       Register mdo  = klass_RInfo;
1470       __ mov_metadata(mdo, md->constant_encoding());
1471       Address data_addr
1472         = __ form_address(rscratch2, mdo,
1473                           md->byte_offset_of_slot(data, DataLayout::header_offset()),
1474                           LogBytesPerInt);
1475       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1476       __ ldrw(rscratch1, data_addr);
1477       __ orrw(rscratch1, rscratch1, header_bits);
1478       __ strw(rscratch1, data_addr);
1479       __ b(done);
1480       __ bind(not_null);
1481     } else {
1482       __ cbz(value, done);
1483     }
1484 
1485     add_debug_info_for_null_check_here(op->info_for_exception());
1486     __ load_klass(k_RInfo, array);
1487     __ load_klass(klass_RInfo, value);
1488 
1489     // get instance klass (it's already uncompressed)
1490     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1491     // perform the fast part of the checking logic
1492     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1493     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1494     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1495     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1496     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1497     // result is a boolean
1498     __ cbzw(k_RInfo, *failure_target);
1499     // fall through to the success case
1500 
1501     if (should_profile) {
1502       Register mdo  = klass_RInfo, recv = k_RInfo;
1503       __ bind(profile_cast_success);
1504       __ mov_metadata(mdo, md->constant_encoding());
1505       __ load_klass(recv, value);
1506       Label update_done;
1507       type_profile_helper(mdo, md, data, recv, &done);
1508       __ b(done);
1509 
1510       __ bind(profile_cast_failure);
1511       __ mov_metadata(mdo, md->constant_encoding());
1512       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1513       __ lea(rscratch2, counter_addr);
1514       __ ldr(rscratch1, Address(rscratch2));
1515       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1516       __ str(rscratch1, Address(rscratch2));
1517       __ b(*stub->entry());
1518     }
1519 
1520     __ bind(done);
1521   } else if (code == lir_checkcast) {
1522     Register obj = op->object()->as_register();
1523     Register dst = op->result_opr()->as_register();
1524     Label success;
1525     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1526     __ bind(success);
1527     if (dst != obj) {
1528       __ mov(dst, obj);
1529     }
1530   } else if (code == lir_instanceof) {
1531     Register obj = op->object()->as_register();
1532     Register dst = op->result_opr()->as_register();
1533     Label success, failure, done;
1534     emit_typecheck_helper(op, &success, &failure, &failure);
1535     __ bind(failure);
1536     __ mov(dst, zr);
1537     __ b(done);
1538     __ bind(success);
1539     __ mov(dst, 1);
1540     __ bind(done);
1541   } else {
1542     ShouldNotReachHere();
1543   }
1544 }
1545 
1546 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1547   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1548   __ cset(rscratch1, Assembler::NE);
1549   __ membar(__ AnyAny);
1550 }
1551 
1552 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1553   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1554   __ cset(rscratch1, Assembler::NE);
1555   __ membar(__ AnyAny);
1556 }
1557 
1558 
1559 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1560   assert(VM_Version::supports_cx8(), "wrong machine");
1561   Register addr;
1562   if (op->addr()->is_register()) {
1563     addr = as_reg(op->addr());
1564   } else {
1565     assert(op->addr()->is_address(), "what else?");
1566     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1567     assert(addr_ptr->disp() == 0, "need 0 disp");
1568     assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
1569     addr = as_reg(addr_ptr->base());
1570   }
1571   Register newval = as_reg(op->new_value());
1572   Register cmpval = as_reg(op->cmp_value());
1573   Label succeed, fail, around;
1574 
1575   if (op->code() == lir_cas_obj) {
1576     if (UseCompressedOops) {
1577       Register t1 = op->tmp1()->as_register();
1578       assert(op->tmp1()->is_valid(), "must be");
1579       __ encode_heap_oop(t1, cmpval);
1580       cmpval = t1;
1581       __ encode_heap_oop(rscratch2, newval);
1582       newval = rscratch2;
1583       casw(addr, newval, cmpval);
1584     } else {
1585       casl(addr, newval, cmpval);
1586     }
1587   } else if (op->code() == lir_cas_int) {
1588     casw(addr, newval, cmpval);
1589   } else {
1590     casl(addr, newval, cmpval);
1591   }
1592 }
1593 
1594 
1595 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1596 
1597   Assembler::Condition acond, ncond;
1598   switch (condition) {
1599   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1600   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1601   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1602   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1603   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1604   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1605   case lir_cond_belowEqual:
1606   case lir_cond_aboveEqual:
1607   default:                    ShouldNotReachHere();
1608     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1609   }
1610 
1611   assert(result->is_single_cpu() || result->is_double_cpu(),
1612          "expect single register for result");
1613   if (opr1->is_constant() && opr2->is_constant()
1614       && opr1->type() == T_INT && opr2->type() == T_INT) {
1615     jint val1 = opr1->as_jint();
1616     jint val2 = opr2->as_jint();
1617     if (val1 == 0 && val2 == 1) {
1618       __ cset(result->as_register(), ncond);
1619       return;
1620     } else if (val1 == 1 && val2 == 0) {
1621       __ cset(result->as_register(), acond);
1622       return;
1623     }
1624   }
1625 
1626   if (opr1->is_constant() && opr2->is_constant()
1627       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1628     jlong val1 = opr1->as_jlong();
1629     jlong val2 = opr2->as_jlong();
1630     if (val1 == 0 && val2 == 1) {
1631       __ cset(result->as_register_lo(), ncond);
1632       return;
1633     } else if (val1 == 1 && val2 == 0) {
1634       __ cset(result->as_register_lo(), acond);
1635       return;
1636     }
1637   }
1638 
1639   if (opr1->is_stack()) {
1640     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1641     opr1 = FrameMap::rscratch1_opr;
1642   } else if (opr1->is_constant()) {
1643     LIR_Opr tmp
1644       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1645     const2reg(opr1, tmp, lir_patch_none, NULL);
1646     opr1 = tmp;
1647   }
1648 
1649   if (opr2->is_stack()) {
1650     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1651     opr2 = FrameMap::rscratch2_opr;
1652   } else if (opr2->is_constant()) {
1653     LIR_Opr tmp
1654       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1655     const2reg(opr2, tmp, lir_patch_none, NULL);
1656     opr2 = tmp;
1657   }
1658 
1659   if (result->type() == T_LONG)
1660     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1661   else
1662     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1663 }
1664 
1665 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1666   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1667 
1668   if (left->is_single_cpu()) {
1669     Register lreg = left->as_register();
1670     Register dreg = as_reg(dest);
1671 
1672     if (right->is_single_cpu()) {
1673       // cpu register - cpu register
1674 
1675       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1676              "should be");
1677       Register rreg = right->as_register();
1678       switch (code) {
1679       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1680       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1681       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1682       default:      ShouldNotReachHere();
1683       }
1684 
1685     } else if (right->is_double_cpu()) {
1686       Register rreg = right->as_register_lo();
1687       // single_cpu + double_cpu: can happen with obj+long
1688       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1689       switch (code) {
1690       case lir_add: __ add(dreg, lreg, rreg); break;
1691       case lir_sub: __ sub(dreg, lreg, rreg); break;
1692       default: ShouldNotReachHere();
1693       }
1694     } else if (right->is_constant()) {
1695       // cpu register - constant
1696       jlong c;
1697 
1698       // FIXME.  This is fugly: we really need to factor all this logic.
1699       switch(right->type()) {
1700       case T_LONG:
1701         c = right->as_constant_ptr()->as_jlong();
1702         break;
1703       case T_INT:
1704       case T_ADDRESS:
1705         c = right->as_constant_ptr()->as_jint();
1706         break;
1707       default:
1708         ShouldNotReachHere();
1709         c = 0;  // unreachable
1710         break;
1711       }
1712 
1713       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1714       if (c == 0 && dreg == lreg) {
1715         COMMENT("effective nop elided");
1716         return;
1717       }
1718       switch(left->type()) {
1719       case T_INT:
1720         switch (code) {
1721         case lir_add: __ addw(dreg, lreg, c); break;
1722         case lir_sub: __ subw(dreg, lreg, c); break;
1723         default: ShouldNotReachHere();
1724         }
1725         break;
1726       case T_OBJECT:
1727       case T_ADDRESS:
1728         switch (code) {
1729         case lir_add: __ add(dreg, lreg, c); break;
1730         case lir_sub: __ sub(dreg, lreg, c); break;
1731         default: ShouldNotReachHere();
1732         }
1733         break;
1734         ShouldNotReachHere();
1735       }
1736     } else {
1737       ShouldNotReachHere();
1738     }
1739 
1740   } else if (left->is_double_cpu()) {
1741     Register lreg_lo = left->as_register_lo();
1742 
1743     if (right->is_double_cpu()) {
1744       // cpu register - cpu register
1745       Register rreg_lo = right->as_register_lo();
1746       switch (code) {
1747       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1748       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1749       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1750       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1751       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1752       default:
1753         ShouldNotReachHere();
1754       }
1755 
1756     } else if (right->is_constant()) {
1757       jlong c = right->as_constant_ptr()->as_jlong_bits();
1758       Register dreg = as_reg(dest);
1759       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1760       if (c == 0 && dreg == lreg_lo) {
1761         COMMENT("effective nop elided");
1762         return;
1763       }
1764       switch (code) {
1765         case lir_add: __ add(dreg, lreg_lo, c); break;
1766         case lir_sub: __ sub(dreg, lreg_lo, c); break;
1767         default:
1768           ShouldNotReachHere();
1769       }
1770     } else {
1771       ShouldNotReachHere();
1772     }
1773   } else if (left->is_single_fpu()) {
1774     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1775     switch (code) {
1776     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1777     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1778     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1779     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1780     default:
1781       ShouldNotReachHere();
1782     }
1783   } else if (left->is_double_fpu()) {
1784     if (right->is_double_fpu()) {
1785       // cpu register - cpu register
1786       switch (code) {
1787       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1788       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1789       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1790       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1791       default:
1792         ShouldNotReachHere();
1793       }
1794     } else {
1795       if (right->is_constant()) {
1796         ShouldNotReachHere();
1797       }
1798       ShouldNotReachHere();
1799     }
1800   } else if (left->is_single_stack() || left->is_address()) {
1801     assert(left == dest, "left and dest must be equal");
1802     ShouldNotReachHere();
1803   } else {
1804     ShouldNotReachHere();
1805   }
1806 }
1807 
1808 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1809 
1810 
1811 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1812   switch(code) {
1813   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1814   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1815   default      : ShouldNotReachHere();
1816   }
1817 }
1818 
1819 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1820 
1821   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1822   Register Rleft = left->is_single_cpu() ? left->as_register() :
1823                                            left->as_register_lo();
1824    if (dst->is_single_cpu()) {
1825      Register Rdst = dst->as_register();
1826      if (right->is_constant()) {
1827        switch (code) {
1828          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1829          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1830          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1831          default: ShouldNotReachHere(); break;
1832        }
1833      } else {
1834        Register Rright = right->is_single_cpu() ? right->as_register() :
1835                                                   right->as_register_lo();
1836        switch (code) {
1837          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1838          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1839          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1840          default: ShouldNotReachHere(); break;
1841        }
1842      }
1843    } else {
1844      Register Rdst = dst->as_register_lo();
1845      if (right->is_constant()) {
1846        switch (code) {
1847          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1848          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1849          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1850          default: ShouldNotReachHere(); break;
1851        }
1852      } else {
1853        Register Rright = right->is_single_cpu() ? right->as_register() :
1854                                                   right->as_register_lo();
1855        switch (code) {
1856          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1857          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1858          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1859          default: ShouldNotReachHere(); break;
1860        }
1861      }
1862    }
1863 }
1864 
1865 
1866 
1867 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
1868 
1869 
1870 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1871   if (opr1->is_constant() && opr2->is_single_cpu()) {
1872     // tableswitch
1873     Register reg = as_reg(opr2);
1874     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1875     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1876   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1877     Register reg1 = as_reg(opr1);
1878     if (opr2->is_single_cpu()) {
1879       // cpu register - cpu register
1880       Register reg2 = opr2->as_register();
1881       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1882         __ cmp(reg1, reg2);
1883       } else {
1884         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1885         __ cmpw(reg1, reg2);
1886       }
1887       return;
1888     }
1889     if (opr2->is_double_cpu()) {
1890       // cpu register - cpu register
1891       Register reg2 = opr2->as_register_lo();
1892       __ cmp(reg1, reg2);
1893       return;
1894     }
1895 
1896     if (opr2->is_constant()) {
1897       bool is_32bit = false; // width of register operand
1898       jlong imm;
1899 
1900       switch(opr2->type()) {
1901       case T_INT:
1902         imm = opr2->as_constant_ptr()->as_jint();
1903         is_32bit = true;
1904         break;
1905       case T_LONG:
1906         imm = opr2->as_constant_ptr()->as_jlong();
1907         break;
1908       case T_ADDRESS:
1909         imm = opr2->as_constant_ptr()->as_jint();
1910         break;
1911       case T_OBJECT:
1912       case T_ARRAY:
1913         imm = jlong(opr2->as_constant_ptr()->as_jobject());
1914         break;
1915       default:
1916         ShouldNotReachHere();
1917         imm = 0;  // unreachable
1918         break;
1919       }
1920 
1921       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1922         if (is_32bit)
1923           __ cmpw(reg1, imm);
1924         else
1925           __ cmp(reg1, imm);
1926         return;
1927       } else {
1928         __ mov(rscratch1, imm);
1929         if (is_32bit)
1930           __ cmpw(reg1, rscratch1);
1931         else
1932           __ cmp(reg1, rscratch1);
1933         return;
1934       }
1935     } else
1936       ShouldNotReachHere();
1937   } else if (opr1->is_single_fpu()) {
1938     FloatRegister reg1 = opr1->as_float_reg();
1939     assert(opr2->is_single_fpu(), "expect single float register");
1940     FloatRegister reg2 = opr2->as_float_reg();
1941     __ fcmps(reg1, reg2);
1942   } else if (opr1->is_double_fpu()) {
1943     FloatRegister reg1 = opr1->as_double_reg();
1944     assert(opr2->is_double_fpu(), "expect double float register");
1945     FloatRegister reg2 = opr2->as_double_reg();
1946     __ fcmpd(reg1, reg2);
1947   } else {
1948     ShouldNotReachHere();
1949   }
1950 }
1951 
1952 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1953   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1954     bool is_unordered_less = (code == lir_ucmp_fd2i);
1955     if (left->is_single_fpu()) {
1956       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1957     } else if (left->is_double_fpu()) {
1958       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1959     } else {
1960       ShouldNotReachHere();
1961     }
1962   } else if (code == lir_cmp_l2i) {
1963     Label done;
1964     __ cmp(left->as_register_lo(), right->as_register_lo());
1965     __ mov(dst->as_register(), (u_int64_t)-1L);
1966     __ br(Assembler::LT, done);
1967     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1968     __ bind(done);
1969   } else {
1970     ShouldNotReachHere();
1971   }
1972 }
1973 
1974 
1975 void LIR_Assembler::align_call(LIR_Code code) {  }
1976 
1977 
1978 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1979   address call = __ trampoline_call(Address(op->addr(), rtype));
1980   if (call == NULL) {
1981     bailout("trampoline stub overflow");
1982     return;
1983   }
1984   add_call_info(code_offset(), op->info());
1985 }
1986 
1987 
1988 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1989   address call = __ ic_call(op->addr());
1990   if (call == NULL) {
1991     bailout("trampoline stub overflow");
1992     return;
1993   }
1994   add_call_info(code_offset(), op->info());
1995 }
1996 
1997 
1998 /* Currently, vtable-dispatch is only enabled for sparc platforms */
1999 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2000   ShouldNotReachHere();
2001 }
2002 
2003 
2004 void LIR_Assembler::emit_static_call_stub() {
2005   address call_pc = __ pc();
2006   address stub = __ start_a_stub(call_stub_size());
2007   if (stub == NULL) {
2008     bailout("static call stub overflow");
2009     return;
2010   }
2011 
2012   int start = __ offset();
2013 
2014   __ relocate(static_stub_Relocation::spec(call_pc));
2015   __ mov_metadata(rmethod, (Metadata*)NULL);
2016   __ movptr(rscratch1, 0);
2017   __ br(rscratch1);
2018 
2019   assert(__ offset() - start <= call_stub_size(), "stub too big");
2020   __ end_a_stub();
2021 }
2022 
2023 
2024 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2025   assert(exceptionOop->as_register() == r0, "must match");
2026   assert(exceptionPC->as_register() == r3, "must match");
2027 
2028   // exception object is not added to oop map by LinearScan
2029   // (LinearScan assumes that no oops are in fixed registers)
2030   info->add_register_oop(exceptionOop);
2031   Runtime1::StubID unwind_id;
2032 
2033   // get current pc information
2034   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2035   int pc_for_athrow_offset = __ offset();
2036   InternalAddress pc_for_athrow(__ pc());
2037   __ adr(exceptionPC->as_register(), pc_for_athrow);
2038   add_call_info(pc_for_athrow_offset, info); // for exception handler
2039 
2040   __ verify_not_null_oop(r0);
2041   // search an exception handler (r0: exception oop, r3: throwing pc)
2042   if (compilation()->has_fpu_code()) {
2043     unwind_id = Runtime1::handle_exception_id;
2044   } else {
2045     unwind_id = Runtime1::handle_exception_nofpu_id;
2046   }
2047   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2048 
2049   // FIXME: enough room for two byte trap   ????
2050   __ nop();
2051 }
2052 
2053 
2054 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2055   assert(exceptionOop->as_register() == r0, "must match");
2056 
2057   __ b(_unwind_handler_entry);
2058 }
2059 
2060 
2061 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2062   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2063   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2064 
2065   switch (left->type()) {
2066     case T_INT: {
2067       switch (code) {
2068       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2069       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2070       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2071       default:
2072         ShouldNotReachHere();
2073         break;
2074       }
2075       break;
2076     case T_LONG:
2077     case T_ADDRESS:
2078     case T_OBJECT:
2079       switch (code) {
2080       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2081       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2082       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2083       default:
2084         ShouldNotReachHere();
2085         break;
2086       }
2087       break;
2088     default:
2089       ShouldNotReachHere();
2090       break;
2091     }
2092   }
2093 }
2094 
2095 
2096 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2097   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2098   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2099 
2100   switch (left->type()) {
2101     case T_INT: {
2102       switch (code) {
2103       case lir_shl:  __ lslw (dreg, lreg, count); break;
2104       case lir_shr:  __ asrw (dreg, lreg, count); break;
2105       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2106       default:
2107         ShouldNotReachHere();
2108         break;
2109       }
2110       break;
2111     case T_LONG:
2112     case T_ADDRESS:
2113     case T_OBJECT:
2114       switch (code) {
2115       case lir_shl:  __ lsl (dreg, lreg, count); break;
2116       case lir_shr:  __ asr (dreg, lreg, count); break;
2117       case lir_ushr: __ lsr (dreg, lreg, count); break;
2118       default:
2119         ShouldNotReachHere();
2120         break;
2121       }
2122       break;
2123     default:
2124       ShouldNotReachHere();
2125       break;
2126     }
2127   }
2128 }
2129 
2130 
2131 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2132   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2133   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2134   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2135   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2136 }
2137 
2138 
2139 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2140   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2141   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2142   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2143   __ mov (rscratch1, c);
2144   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2145 }
2146 
2147 
2148 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2149   ShouldNotReachHere();
2150   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2151   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2152   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2153   __ lea(rscratch1, __ constant_oop_address(o));
2154   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2155 }
2156 
2157 
2158 // This code replaces a call to arraycopy; no exception may
2159 // be thrown in this code, they must be thrown in the System.arraycopy
2160 // activation frame; we could save some checks if this would not be the case
2161 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2162   ciArrayKlass* default_type = op->expected_type();
2163   Register src = op->src()->as_register();
2164   Register dst = op->dst()->as_register();
2165   Register src_pos = op->src_pos()->as_register();
2166   Register dst_pos = op->dst_pos()->as_register();
2167   Register length  = op->length()->as_register();
2168   Register tmp = op->tmp()->as_register();
2169 
2170   CodeStub* stub = op->stub();
2171   int flags = op->flags();
2172   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2173   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2174 
2175   // if we don't know anything, just go through the generic arraycopy
2176   if (default_type == NULL // || basic_type == T_OBJECT
2177       ) {
2178     Label done;
2179     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2180 
2181     // Save the arguments in case the generic arraycopy fails and we
2182     // have to fall back to the JNI stub
2183     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2184     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2185     __ str(src,              Address(sp, 4*BytesPerWord));
2186 
2187     address copyfunc_addr = StubRoutines::generic_arraycopy();
2188     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2189 
2190     // The arguments are in java calling convention so we shift them
2191     // to C convention
2192     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2193     __ mov(c_rarg0, j_rarg0);
2194     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2195     __ mov(c_rarg1, j_rarg1);
2196     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2197     __ mov(c_rarg2, j_rarg2);
2198     assert_different_registers(c_rarg3, j_rarg4);
2199     __ mov(c_rarg3, j_rarg3);
2200     __ mov(c_rarg4, j_rarg4);
2201 #ifndef PRODUCT
2202     if (PrintC1Statistics) {
2203       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2204     }
2205 #endif
2206     __ far_call(RuntimeAddress(copyfunc_addr));
2207 
2208     __ cbz(r0, *stub->continuation());
2209 
2210     // Reload values from the stack so they are where the stub
2211     // expects them.
2212     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2213     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2214     __ ldr(src,              Address(sp, 4*BytesPerWord));
2215 
2216     // r0 is -1^K where K == partial copied count
2217     __ eonw(rscratch1, r0, 0);
2218     // adjust length down and src/end pos up by partial copied count
2219     __ subw(length, length, rscratch1);
2220     __ addw(src_pos, src_pos, rscratch1);
2221     __ addw(dst_pos, dst_pos, rscratch1);
2222     __ b(*stub->entry());
2223 
2224     __ bind(*stub->continuation());
2225     return;
2226   }
2227 
2228   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2229 
2230   int elem_size = type2aelembytes(basic_type);
2231   int shift_amount;
2232   int scale = exact_log2(elem_size);
2233 
2234   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2235   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2236   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2237   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2238 
2239   // test for NULL
2240   if (flags & LIR_OpArrayCopy::src_null_check) {
2241     __ cbz(src, *stub->entry());
2242   }
2243   if (flags & LIR_OpArrayCopy::dst_null_check) {
2244     __ cbz(dst, *stub->entry());
2245   }
2246 
2247   // If the compiler was not able to prove that exact type of the source or the destination
2248   // of the arraycopy is an array type, check at runtime if the source or the destination is
2249   // an instance type.
2250   if (flags & LIR_OpArrayCopy::type_check) {
2251     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2252       __ load_klass(tmp, dst);
2253       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2254       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2255       __ br(Assembler::GE, *stub->entry());
2256     }
2257 
2258     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2259       __ load_klass(tmp, src);
2260       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2261       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2262       __ br(Assembler::GE, *stub->entry());
2263     }
2264   }
2265 
2266   // check if negative
2267   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2268     __ cmpw(src_pos, 0);
2269     __ br(Assembler::LT, *stub->entry());
2270   }
2271   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2272     __ cmpw(dst_pos, 0);
2273     __ br(Assembler::LT, *stub->entry());
2274   }
2275 
2276   if (flags & LIR_OpArrayCopy::length_positive_check) {
2277     __ cmpw(length, 0);
2278     __ br(Assembler::LT, *stub->entry());
2279   }
2280 
2281   if (flags & LIR_OpArrayCopy::src_range_check) {
2282     __ addw(tmp, src_pos, length);
2283     __ ldrw(rscratch1, src_length_addr);
2284     __ cmpw(tmp, rscratch1);
2285     __ br(Assembler::HI, *stub->entry());
2286   }
2287   if (flags & LIR_OpArrayCopy::dst_range_check) {
2288     __ addw(tmp, dst_pos, length);
2289     __ ldrw(rscratch1, dst_length_addr);
2290     __ cmpw(tmp, rscratch1);
2291     __ br(Assembler::HI, *stub->entry());
2292   }
2293 
2294   if (flags & LIR_OpArrayCopy::type_check) {
2295     // We don't know the array types are compatible
2296     if (basic_type != T_OBJECT) {
2297       // Simple test for basic type arrays
2298       if (UseCompressedClassPointers) {
2299         __ ldrw(tmp, src_klass_addr);
2300         __ ldrw(rscratch1, dst_klass_addr);
2301         __ cmpw(tmp, rscratch1);
2302       } else {
2303         __ ldr(tmp, src_klass_addr);
2304         __ ldr(rscratch1, dst_klass_addr);
2305         __ cmp(tmp, rscratch1);
2306       }
2307       __ br(Assembler::NE, *stub->entry());
2308     } else {
2309       // For object arrays, if src is a sub class of dst then we can
2310       // safely do the copy.
2311       Label cont, slow;
2312 
2313 #define PUSH(r1, r2)                                    \
2314       stp(r1, r2, __ pre(sp, -2 * wordSize));
2315 
2316 #define POP(r1, r2)                                     \
2317       ldp(r1, r2, __ post(sp, 2 * wordSize));
2318 
2319       __ PUSH(src, dst);
2320 
2321       __ load_klass(src, src);
2322       __ load_klass(dst, dst);
2323 
2324       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2325 
2326       __ PUSH(src, dst);
2327       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2328       __ POP(src, dst);
2329 
2330       __ cbnz(src, cont);
2331 
2332       __ bind(slow);
2333       __ POP(src, dst);
2334 
2335       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2336       if (copyfunc_addr != NULL) { // use stub if available
2337         // src is not a sub class of dst so we have to do a
2338         // per-element check.
2339 
2340         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2341         if ((flags & mask) != mask) {
2342           // Check that at least both of them object arrays.
2343           assert(flags & mask, "one of the two should be known to be an object array");
2344 
2345           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2346             __ load_klass(tmp, src);
2347           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2348             __ load_klass(tmp, dst);
2349           }
2350           int lh_offset = in_bytes(Klass::layout_helper_offset());
2351           Address klass_lh_addr(tmp, lh_offset);
2352           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2353           __ ldrw(rscratch1, klass_lh_addr);
2354           __ mov(rscratch2, objArray_lh);
2355           __ eorw(rscratch1, rscratch1, rscratch2);
2356           __ cbnzw(rscratch1, *stub->entry());
2357         }
2358 
2359        // Spill because stubs can use any register they like and it's
2360        // easier to restore just those that we care about.
2361         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2362         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2363         __ str(src,              Address(sp, 4*BytesPerWord));
2364 
2365         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2366         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2367         assert_different_registers(c_rarg0, dst, dst_pos, length);
2368         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2369         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2370         assert_different_registers(c_rarg1, dst, length);
2371         __ uxtw(c_rarg2, length);
2372         assert_different_registers(c_rarg2, dst);
2373 
2374         __ load_klass(c_rarg4, dst);
2375         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2376         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2377         __ far_call(RuntimeAddress(copyfunc_addr));
2378 
2379 #ifndef PRODUCT
2380         if (PrintC1Statistics) {
2381           Label failed;
2382           __ cbnz(r0, failed);
2383           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2384           __ bind(failed);
2385         }
2386 #endif
2387 
2388         __ cbz(r0, *stub->continuation());
2389 
2390 #ifndef PRODUCT
2391         if (PrintC1Statistics) {
2392           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2393         }
2394 #endif
2395         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2396 
2397         // Restore previously spilled arguments
2398         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2399         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2400         __ ldr(src,              Address(sp, 4*BytesPerWord));
2401 
2402         // return value is -1^K where K is partial copied count
2403         __ eonw(rscratch1, r0, zr);
2404         // adjust length down and src/end pos up by partial copied count
2405         __ subw(length, length, rscratch1);
2406         __ addw(src_pos, src_pos, rscratch1);
2407         __ addw(dst_pos, dst_pos, rscratch1);
2408       }
2409 
2410       __ b(*stub->entry());
2411 
2412       __ bind(cont);
2413       __ POP(src, dst);
2414     }
2415   }
2416 
2417 #ifdef ASSERT
2418   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2419     // Sanity check the known type with the incoming class.  For the
2420     // primitive case the types must match exactly with src.klass and
2421     // dst.klass each exactly matching the default type.  For the
2422     // object array case, if no type check is needed then either the
2423     // dst type is exactly the expected type and the src type is a
2424     // subtype which we can't check or src is the same array as dst
2425     // but not necessarily exactly of type default_type.
2426     Label known_ok, halt;
2427     __ mov_metadata(tmp, default_type->constant_encoding());
2428     if (UseCompressedClassPointers) {
2429       __ encode_klass_not_null(tmp);
2430     }
2431 
2432     if (basic_type != T_OBJECT) {
2433 
2434       if (UseCompressedClassPointers) {
2435         __ ldrw(rscratch1, dst_klass_addr);
2436         __ cmpw(tmp, rscratch1);
2437       } else {
2438         __ ldr(rscratch1, dst_klass_addr);
2439         __ cmp(tmp, rscratch1);
2440       }
2441       __ br(Assembler::NE, halt);
2442       if (UseCompressedClassPointers) {
2443         __ ldrw(rscratch1, src_klass_addr);
2444         __ cmpw(tmp, rscratch1);
2445       } else {
2446         __ ldr(rscratch1, src_klass_addr);
2447         __ cmp(tmp, rscratch1);
2448       }
2449       __ br(Assembler::EQ, known_ok);
2450     } else {
2451       if (UseCompressedClassPointers) {
2452         __ ldrw(rscratch1, dst_klass_addr);
2453         __ cmpw(tmp, rscratch1);
2454       } else {
2455         __ ldr(rscratch1, dst_klass_addr);
2456         __ cmp(tmp, rscratch1);
2457       }
2458       __ br(Assembler::EQ, known_ok);
2459       __ cmp(src, dst);
2460       __ br(Assembler::EQ, known_ok);
2461     }
2462     __ bind(halt);
2463     __ stop("incorrect type information in arraycopy");
2464     __ bind(known_ok);
2465   }
2466 #endif
2467 
2468 #ifndef PRODUCT
2469   if (PrintC1Statistics) {
2470     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2471   }
2472 #endif
2473 
2474   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2475   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2476   assert_different_registers(c_rarg0, dst, dst_pos, length);
2477   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2478   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2479   assert_different_registers(c_rarg1, dst, length);
2480   __ uxtw(c_rarg2, length);
2481   assert_different_registers(c_rarg2, dst);
2482 
2483   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2484   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2485   const char *name;
2486   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2487 
2488  CodeBlob *cb = CodeCache::find_blob(entry);
2489  if (cb) {
2490    __ far_call(RuntimeAddress(entry));
2491  } else {
2492    __ call_VM_leaf(entry, 3);
2493  }
2494 
2495   __ bind(*stub->continuation());
2496 }
2497 
2498 
2499 
2500 
2501 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2502   Register obj = op->obj_opr()->as_register();  // may not be an oop
2503   Register hdr = op->hdr_opr()->as_register();
2504   Register lock = op->lock_opr()->as_register();
2505   if (!UseFastLocking) {
2506     __ b(*op->stub()->entry());
2507   } else if (op->code() == lir_lock) {
2508     Register scratch = noreg;
2509     if (UseBiasedLocking) {
2510       scratch = op->scratch_opr()->as_register();
2511     }
2512     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2513     // add debug info for NullPointerException only if one is possible
2514     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2515     if (op->info() != NULL) {
2516       add_debug_info_for_null_check(null_check_offset, op->info());
2517     }
2518     // done
2519   } else if (op->code() == lir_unlock) {
2520     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2521     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2522   } else {
2523     Unimplemented();
2524   }
2525   __ bind(*op->stub()->continuation());
2526 }
2527 
2528 
2529 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2530   ciMethod* method = op->profiled_method();
2531   int bci          = op->profiled_bci();
2532   ciMethod* callee = op->profiled_callee();
2533 
2534   // Update counter for all call types
2535   ciMethodData* md = method->method_data_or_null();
2536   assert(md != NULL, "Sanity");
2537   ciProfileData* data = md->bci_to_data(bci);
2538   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2539   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2540   Register mdo  = op->mdo()->as_register();
2541   __ mov_metadata(mdo, md->constant_encoding());
2542   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2543   // Perform additional virtual call profiling for invokevirtual and
2544   // invokeinterface bytecodes
2545   if (op->should_profile_receiver_type()) {
2546     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2547     Register recv = op->recv()->as_register();
2548     assert_different_registers(mdo, recv);
2549     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2550     ciKlass* known_klass = op->known_holder();
2551     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2552       // We know the type that will be seen at this call site; we can
2553       // statically update the MethodData* rather than needing to do
2554       // dynamic tests on the receiver type
2555 
2556       // NOTE: we should probably put a lock around this search to
2557       // avoid collisions by concurrent compilations
2558       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2559       uint i;
2560       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2561         ciKlass* receiver = vc_data->receiver(i);
2562         if (known_klass->equals(receiver)) {
2563           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2564           __ addptr(data_addr, DataLayout::counter_increment);
2565           return;
2566         }
2567       }
2568 
2569       // Receiver type not found in profile data; select an empty slot
2570 
2571       // Note that this is less efficient than it should be because it
2572       // always does a write to the receiver part of the
2573       // VirtualCallData rather than just the first time
2574       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2575         ciKlass* receiver = vc_data->receiver(i);
2576         if (receiver == NULL) {
2577           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2578           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2579           __ lea(rscratch2, recv_addr);
2580           __ str(rscratch1, Address(rscratch2));
2581           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2582           __ addptr(data_addr, DataLayout::counter_increment);
2583           return;
2584         }
2585       }
2586     } else {
2587       __ load_klass(recv, recv);
2588       Label update_done;
2589       type_profile_helper(mdo, md, data, recv, &update_done);
2590       // Receiver did not match any saved receiver and there is no empty row for it.
2591       // Increment total counter to indicate polymorphic case.
2592       __ addptr(counter_addr, DataLayout::counter_increment);
2593 
2594       __ bind(update_done);
2595     }
2596   } else {
2597     // Static call
2598     __ addptr(counter_addr, DataLayout::counter_increment);
2599   }
2600 }
2601 
2602 
2603 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2604   Unimplemented();
2605 }
2606 
2607 
2608 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2609   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2610 }
2611 
2612 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2613   assert(op->crc()->is_single_cpu(),  "crc must be register");
2614   assert(op->val()->is_single_cpu(),  "byte value must be register");
2615   assert(op->result_opr()->is_single_cpu(), "result must be register");
2616   Register crc = op->crc()->as_register();
2617   Register val = op->val()->as_register();
2618   Register res = op->result_opr()->as_register();
2619 
2620   assert_different_registers(val, crc, res);
2621   unsigned long offset;
2622   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2623   if (offset) __ add(res, res, offset);
2624 
2625   __ mvnw(crc, crc); // ~crc
2626   __ update_byte_crc32(crc, val, res);
2627   __ mvnw(res, crc); // ~crc
2628 }
2629 
2630 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2631   COMMENT("emit_profile_type {");
2632   Register obj = op->obj()->as_register();
2633   Register tmp = op->tmp()->as_pointer_register();
2634   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2635   ciKlass* exact_klass = op->exact_klass();
2636   intptr_t current_klass = op->current_klass();
2637   bool not_null = op->not_null();
2638   bool no_conflict = op->no_conflict();
2639 
2640   Label update, next, none;
2641 
2642   bool do_null = !not_null;
2643   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2644   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2645 
2646   assert(do_null || do_update, "why are we here?");
2647   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2648   assert(mdo_addr.base() != rscratch1, "wrong register");
2649 
2650   __ verify_oop(obj);
2651 
2652   if (tmp != obj) {
2653     __ mov(tmp, obj);
2654   }
2655   if (do_null) {
2656     __ cbnz(tmp, update);
2657     if (!TypeEntries::was_null_seen(current_klass)) {
2658       __ ldr(rscratch2, mdo_addr);
2659       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2660       __ str(rscratch2, mdo_addr);
2661     }
2662     if (do_update) {
2663 #ifndef ASSERT
2664       __ b(next);
2665     }
2666 #else
2667       __ b(next);
2668     }
2669   } else {
2670     __ cbnz(tmp, update);
2671     __ stop("unexpected null obj");
2672 #endif
2673   }
2674 
2675   __ bind(update);
2676 
2677   if (do_update) {
2678 #ifdef ASSERT
2679     if (exact_klass != NULL) {
2680       Label ok;
2681       __ load_klass(tmp, tmp);
2682       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2683       __ eor(rscratch1, tmp, rscratch1);
2684       __ cbz(rscratch1, ok);
2685       __ stop("exact klass and actual klass differ");
2686       __ bind(ok);
2687     }
2688 #endif
2689     if (!no_conflict) {
2690       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2691         if (exact_klass != NULL) {
2692           __ mov_metadata(tmp, exact_klass->constant_encoding());
2693         } else {
2694           __ load_klass(tmp, tmp);
2695         }
2696 
2697         __ ldr(rscratch2, mdo_addr);
2698         __ eor(tmp, tmp, rscratch2);
2699         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2700         // klass seen before, nothing to do. The unknown bit may have been
2701         // set already but no need to check.
2702         __ cbz(rscratch1, next);
2703 
2704         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2705 
2706         if (TypeEntries::is_type_none(current_klass)) {
2707           __ cbz(rscratch2, none);
2708           __ cmp(rscratch2, TypeEntries::null_seen);
2709           __ br(Assembler::EQ, none);
2710           // There is a chance that the checks above (re-reading profiling
2711           // data from memory) fail if another thread has just set the
2712           // profiling to this obj's klass
2713           __ dmb(Assembler::ISHLD);
2714           __ ldr(rscratch2, mdo_addr);
2715           __ eor(tmp, tmp, rscratch2);
2716           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2717           __ cbz(rscratch1, next);
2718         }
2719       } else {
2720         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2721                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2722 
2723         __ ldr(tmp, mdo_addr);
2724         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2725       }
2726 
2727       // different than before. Cannot keep accurate profile.
2728       __ ldr(rscratch2, mdo_addr);
2729       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2730       __ str(rscratch2, mdo_addr);
2731 
2732       if (TypeEntries::is_type_none(current_klass)) {
2733         __ b(next);
2734 
2735         __ bind(none);
2736         // first time here. Set profile type.
2737         __ str(tmp, mdo_addr);
2738       }
2739     } else {
2740       // There's a single possible klass at this profile point
2741       assert(exact_klass != NULL, "should be");
2742       if (TypeEntries::is_type_none(current_klass)) {
2743         __ mov_metadata(tmp, exact_klass->constant_encoding());
2744         __ ldr(rscratch2, mdo_addr);
2745         __ eor(tmp, tmp, rscratch2);
2746         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2747         __ cbz(rscratch1, next);
2748 #ifdef ASSERT
2749         {
2750           Label ok;
2751           __ ldr(rscratch1, mdo_addr);
2752           __ cbz(rscratch1, ok);
2753           __ cmp(rscratch1, TypeEntries::null_seen);
2754           __ br(Assembler::EQ, ok);
2755           // may have been set by another thread
2756           __ dmb(Assembler::ISHLD);
2757           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2758           __ ldr(rscratch2, mdo_addr);
2759           __ eor(rscratch2, rscratch1, rscratch2);
2760           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2761           __ cbz(rscratch2, ok);
2762 
2763           __ stop("unexpected profiling mismatch");
2764           __ bind(ok);
2765         }
2766 #endif
2767         // first time here. Set profile type.
2768         __ ldr(tmp, mdo_addr);
2769       } else {
2770         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2771                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2772 
2773         __ ldr(tmp, mdo_addr);
2774         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2775 
2776         __ orr(tmp, tmp, TypeEntries::type_unknown);
2777         __ str(tmp, mdo_addr);
2778         // FIXME: Write barrier needed here?
2779       }
2780     }
2781 
2782     __ bind(next);
2783   }
2784   COMMENT("} emit_profile_type");
2785 }
2786 
2787 
2788 void LIR_Assembler::align_backward_branch_target() {
2789 }
2790 
2791 
2792 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2793   if (left->is_single_cpu()) {
2794     assert(dest->is_single_cpu(), "expect single result reg");
2795     __ negw(dest->as_register(), left->as_register());
2796   } else if (left->is_double_cpu()) {
2797     assert(dest->is_double_cpu(), "expect double result reg");
2798     __ neg(dest->as_register_lo(), left->as_register_lo());
2799   } else if (left->is_single_fpu()) {
2800     assert(dest->is_single_fpu(), "expect single float result reg");
2801     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2802   } else {
2803     assert(left->is_double_fpu(), "expect double float operand reg");
2804     assert(dest->is_double_fpu(), "expect double float result reg");
2805     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2806   }
2807 }
2808 
2809 
2810 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
2811   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2812 }
2813 
2814 
2815 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2816   assert(!tmp->is_valid(), "don't need temporary");
2817 
2818   CodeBlob *cb = CodeCache::find_blob(dest);
2819   if (cb) {
2820     __ far_call(RuntimeAddress(dest));
2821   } else {
2822     __ mov(rscratch1, RuntimeAddress(dest));
2823     int len = args->length();
2824     int type = 0;
2825     if (! result->is_illegal()) {
2826       switch (result->type()) {
2827       case T_VOID:
2828         type = 0;
2829         break;
2830       case T_INT:
2831       case T_LONG:
2832       case T_OBJECT:
2833         type = 1;
2834         break;
2835       case T_FLOAT:
2836         type = 2;
2837         break;
2838       case T_DOUBLE:
2839         type = 3;
2840         break;
2841       default:
2842         ShouldNotReachHere();
2843         break;
2844       }
2845     }
2846     int num_gpargs = 0;
2847     int num_fpargs = 0;
2848     for (int i = 0; i < args->length(); i++) {
2849       LIR_Opr arg = args->at(i);
2850       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
2851         num_fpargs++;
2852       } else {
2853         num_gpargs++;
2854       }
2855     }
2856     __ blrt(rscratch1, num_gpargs, num_fpargs, type);
2857   }
2858 
2859   if (info != NULL) {
2860     add_call_info_here(info);
2861   }
2862   __ maybe_isb();
2863 }
2864 
2865 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2866   if (dest->is_address() || src->is_address()) {
2867     move_op(src, dest, type, lir_patch_none, info,
2868             /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2869   } else {
2870     ShouldNotReachHere();
2871   }
2872 }
2873 
2874 #ifdef ASSERT
2875 // emit run-time assertion
2876 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2877   assert(op->code() == lir_assert, "must be");
2878 
2879   if (op->in_opr1()->is_valid()) {
2880     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2881     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2882   } else {
2883     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2884     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2885   }
2886 
2887   Label ok;
2888   if (op->condition() != lir_cond_always) {
2889     Assembler::Condition acond = Assembler::AL;
2890     switch (op->condition()) {
2891       case lir_cond_equal:        acond = Assembler::EQ;  break;
2892       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2893       case lir_cond_less:         acond = Assembler::LT;  break;
2894       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2895       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2896       case lir_cond_greater:      acond = Assembler::GT;  break;
2897       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2898       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2899       default:                    ShouldNotReachHere();
2900     }
2901     __ br(acond, ok);
2902   }
2903   if (op->halt()) {
2904     const char* str = __ code_string(op->msg());
2905     __ stop(str);
2906   } else {
2907     breakpoint();
2908   }
2909   __ bind(ok);
2910 }
2911 #endif
2912 
2913 #ifndef PRODUCT
2914 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2915 #else
2916 #define COMMENT(x)
2917 #endif
2918 
2919 void LIR_Assembler::membar() {
2920   COMMENT("membar");
2921   __ membar(MacroAssembler::AnyAny);
2922 }
2923 
2924 void LIR_Assembler::membar_acquire() {
2925   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2926 }
2927 
2928 void LIR_Assembler::membar_release() {
2929   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2930 }
2931 
2932 void LIR_Assembler::membar_loadload() {
2933   __ membar(Assembler::LoadLoad);
2934 }
2935 
2936 void LIR_Assembler::membar_storestore() {
2937   __ membar(MacroAssembler::StoreStore);
2938 }
2939 
2940 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2941 
2942 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2943 
2944 void LIR_Assembler::on_spin_wait() {
2945   Unimplemented();
2946 }
2947 
2948 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2949   __ mov(result_reg->as_register(), rthread);
2950 }
2951 
2952 
2953 void LIR_Assembler::peephole(LIR_List *lir) {
2954 #if 0
2955   if (tableswitch_count >= max_tableswitches)
2956     return;
2957 
2958   /*
2959     This finite-state automaton recognizes sequences of compare-and-
2960     branch instructions.  We will turn them into a tableswitch.  You
2961     could argue that C1 really shouldn't be doing this sort of
2962     optimization, but without it the code is really horrible.
2963   */
2964 
2965   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2966   int first_key, last_key = -2147483648;
2967   int next_key = 0;
2968   int start_insn = -1;
2969   int last_insn = -1;
2970   Register reg = noreg;
2971   LIR_Opr reg_opr;
2972   state = start_s;
2973 
2974   LIR_OpList* inst = lir->instructions_list();
2975   for (int i = 0; i < inst->length(); i++) {
2976     LIR_Op* op = inst->at(i);
2977     switch (state) {
2978     case start_s:
2979       first_key = -1;
2980       start_insn = i;
2981       switch (op->code()) {
2982       case lir_cmp:
2983         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2984         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2985         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2986             && opr2->is_constant()
2987             && opr2->type() == T_INT) {
2988           reg_opr = opr1;
2989           reg = opr1->as_register();
2990           first_key = opr2->as_constant_ptr()->as_jint();
2991           next_key = first_key + 1;
2992           state = cmp_s;
2993           goto next_state;
2994         }
2995         break;
2996       }
2997       break;
2998     case cmp_s:
2999       switch (op->code()) {
3000       case lir_branch:
3001         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3002           state = beq_s;
3003           last_insn = i;
3004           goto next_state;
3005         }
3006       }
3007       state = start_s;
3008       break;
3009     case beq_s:
3010       switch (op->code()) {
3011       case lir_cmp: {
3012         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3013         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3014         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3015             && opr1->as_register() == reg
3016             && opr2->is_constant()
3017             && opr2->type() == T_INT
3018             && opr2->as_constant_ptr()->as_jint() == next_key) {
3019           last_key = next_key;
3020           next_key++;
3021           state = cmp_s;
3022           goto next_state;
3023         }
3024       }
3025       }
3026       last_key = next_key;
3027       state = start_s;
3028       break;
3029     default:
3030       assert(false, "impossible state");
3031     }
3032     if (state == start_s) {
3033       if (first_key < last_key - 5L && reg != noreg) {
3034         {
3035           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3036           //        reg->encoding(),
3037           //        start_insn, first_key, last_key);
3038           //   for (int i = 0; i < inst->length(); i++) {
3039           //     inst->at(i)->print();
3040           //     tty->print("\n");
3041           //   }
3042           //   tty->print("\n");
3043         }
3044 
3045         struct tableswitch *sw = &switches[tableswitch_count];
3046         sw->_insn_index = start_insn, sw->_first_key = first_key,
3047           sw->_last_key = last_key, sw->_reg = reg;
3048         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3049         {
3050           // Insert the new table of branches
3051           int offset = last_insn;
3052           for (int n = first_key; n < last_key; n++) {
3053             inst->insert_before
3054               (last_insn + 1,
3055                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3056                                 inst->at(offset)->as_OpBranch()->label()));
3057             offset -= 2, i++;
3058           }
3059         }
3060         // Delete all the old compare-and-branch instructions
3061         for (int n = first_key; n < last_key; n++) {
3062           inst->remove_at(start_insn);
3063           inst->remove_at(start_insn);
3064         }
3065         // Insert the tableswitch instruction
3066         inst->insert_before(start_insn,
3067                             new LIR_Op2(lir_cmp, lir_cond_always,
3068                                         LIR_OprFact::intConst(tableswitch_count),
3069                                         reg_opr));
3070         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3071         tableswitch_count++;
3072       }
3073       reg = noreg;
3074       last_key = -2147483648;
3075     }
3076   next_state:
3077     ;
3078   }
3079 #endif
3080 }
3081 
3082 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3083   Address addr = as_Address(src->as_address_ptr());
3084   BasicType type = src->type();
3085   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3086 
3087   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3088   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3089 
3090   switch(type) {
3091   case T_INT:
3092     xchg = &MacroAssembler::atomic_xchgalw;
3093     add = &MacroAssembler::atomic_addalw;
3094     break;
3095   case T_LONG:
3096     xchg = &MacroAssembler::atomic_xchgal;
3097     add = &MacroAssembler::atomic_addal;
3098     break;
3099   case T_OBJECT:
3100   case T_ARRAY:
3101     if (UseCompressedOops) {
3102       xchg = &MacroAssembler::atomic_xchgalw;
3103       add = &MacroAssembler::atomic_addalw;
3104     } else {
3105       xchg = &MacroAssembler::atomic_xchgal;
3106       add = &MacroAssembler::atomic_addal;
3107     }
3108     break;
3109   default:
3110     ShouldNotReachHere();
3111     xchg = &MacroAssembler::atomic_xchgal;
3112     add = &MacroAssembler::atomic_addal; // unreachable
3113   }
3114 
3115   switch (code) {
3116   case lir_xadd:
3117     {
3118       RegisterOrConstant inc;
3119       Register tmp = as_reg(tmp_op);
3120       Register dst = as_reg(dest);
3121       if (data->is_constant()) {
3122         inc = RegisterOrConstant(as_long(data));
3123         assert_different_registers(dst, addr.base(), tmp,
3124                                    rscratch1, rscratch2);
3125       } else {
3126         inc = RegisterOrConstant(as_reg(data));
3127         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3128                                    rscratch1, rscratch2);
3129       }
3130       __ lea(tmp, addr);
3131       (_masm->*add)(dst, inc, tmp);
3132       break;
3133     }
3134   case lir_xchg:
3135     {
3136       Register tmp = tmp_op->as_register();
3137       Register obj = as_reg(data);
3138       Register dst = as_reg(dest);
3139       if (is_oop && UseCompressedOops) {
3140         __ encode_heap_oop(rscratch2, obj);
3141         obj = rscratch2;
3142       }
3143       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3144       __ lea(tmp, addr);
3145       (_masm->*xchg)(dst, obj, tmp);
3146       if (is_oop && UseCompressedOops) {
3147         __ decode_heap_oop(dst);
3148       }
3149     }
3150     break;
3151   default:
3152     ShouldNotReachHere();
3153   }
3154   __ membar(__ AnyAny);
3155 }
3156 
3157 #undef __