1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciObjArrayKlass.hpp"
  35 #include "ci/ciTypeArrayKlass.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "gc/shared/c1/barrierSetC1.hpp"
  38 #include "gc/shared/cardTable.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "vmreg_arm.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 void LIRItem::load_nonconstant() {
  55   LIR_Opr r = value()->operand();
  56   if (_gen->can_inline_as_constant(value())) {
  57     if (!r->is_constant()) {
  58       r = LIR_OprFact::value_type(value()->type());
  59     }
  60     _result = r;
  61   } else {
  62     load_item();
  63   }
  64 }
  65 
  66 //--------------------------------------------------------------
  67 //               LIRGenerator
  68 //--------------------------------------------------------------
  69 
  70 
  71 LIR_Opr LIRGenerator::exceptionOopOpr() {
  72   return FrameMap::Exception_oop_opr;
  73 }
  74 
  75 LIR_Opr LIRGenerator::exceptionPcOpr()  {
  76   return FrameMap::Exception_pc_opr;
  77 }
  78 
  79 LIR_Opr LIRGenerator::syncLockOpr()     {
  80   return new_register(T_INT);
  81 }
  82 
  83 LIR_Opr LIRGenerator::syncTempOpr()     {
  84   return new_register(T_OBJECT);
  85 }
  86 
  87 LIR_Opr LIRGenerator::getThreadTemp()   {
  88   return LIR_OprFact::illegalOpr;
  89 }
  90 
  91 LIR_Opr LIRGenerator::atomicLockOpr() {
  92   return LIR_OprFact::illegalOpr;
  93 }
  94 
  95 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  96   LIR_Opr opr;
  97   switch (type->tag()) {
  98     case intTag:     opr = FrameMap::Int_result_opr;    break;
  99     case objectTag:  opr = FrameMap::Object_result_opr; break;
 100     case longTag:    opr = FrameMap::Long_result_opr;   break;
 101     case floatTag:   opr = FrameMap::Float_result_opr;  break;
 102     case doubleTag:  opr = FrameMap::Double_result_opr; break;
 103     case addressTag:
 104     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 105   }
 106   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 107   return opr;
 108 }
 109 
 110 
 111 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 112   return new_register(T_INT);
 113 }
 114 
 115 
 116 //--------- loading items into registers --------------------------------
 117 
 118 
 119 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 120 #ifdef AARCH64
 121   if (v->type()->as_IntConstant() != NULL) {
 122     return v->type()->as_IntConstant()->value() == 0;
 123   } else if (v->type()->as_LongConstant() != NULL) {
 124     return v->type()->as_LongConstant()->value() == 0;
 125   } else if (v->type()->as_ObjectConstant() != NULL) {
 126     return v->type()->as_ObjectConstant()->value()->is_null_object();
 127   } else if (v->type()->as_FloatConstant() != NULL) {
 128     return jint_cast(v->type()->as_FloatConstant()->value()) == 0;
 129   } else if (v->type()->as_DoubleConstant() != NULL) {
 130     return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0;
 131   }
 132 #endif // AARCH64
 133   return false;
 134 }
 135 
 136 
 137 bool LIRGenerator::can_inline_as_constant(Value v) const {
 138   if (v->type()->as_IntConstant() != NULL) {
 139     return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value());
 140   } else if (v->type()->as_ObjectConstant() != NULL) {
 141     return v->type()->as_ObjectConstant()->value()->is_null_object();
 142 #ifdef AARCH64
 143   } else if (v->type()->as_LongConstant() != NULL) {
 144     return Assembler::is_arith_imm_in_range(v->type()->as_LongConstant()->value());
 145 #else
 146   } else if (v->type()->as_FloatConstant() != NULL) {
 147     return v->type()->as_FloatConstant()->value() == 0.0f;
 148   } else if (v->type()->as_DoubleConstant() != NULL) {
 149     return v->type()->as_DoubleConstant()->value() == 0.0;
 150 #endif // AARCH64
 151   }
 152   return false;
 153 }
 154 
 155 
 156 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 157   ShouldNotCallThis(); // Not used on ARM
 158   return false;
 159 }
 160 
 161 
 162 #ifdef AARCH64
 163 
 164 static bool can_inline_as_constant_in_cmp(Value v) {
 165   jlong constant;
 166   if (v->type()->as_IntConstant() != NULL) {
 167     constant = v->type()->as_IntConstant()->value();
 168   } else if (v->type()->as_LongConstant() != NULL) {
 169     constant = v->type()->as_LongConstant()->value();
 170   } else if (v->type()->as_ObjectConstant() != NULL) {
 171     return v->type()->as_ObjectConstant()->value()->is_null_object();
 172   } else if (v->type()->as_FloatConstant() != NULL) {
 173     return v->type()->as_FloatConstant()->value() == 0.0f;
 174   } else if (v->type()->as_DoubleConstant() != NULL) {
 175     return v->type()->as_DoubleConstant()->value() == 0.0;
 176   } else {
 177     return false;
 178   }
 179 
 180   return Assembler::is_arith_imm_in_range(constant) || Assembler::is_arith_imm_in_range(-constant);
 181 }
 182 
 183 
 184 static bool can_inline_as_constant_in_logic(Value v) {
 185   if (v->type()->as_IntConstant() != NULL) {
 186     return Assembler::LogicalImmediate(v->type()->as_IntConstant()->value(), true).is_encoded();
 187   } else if (v->type()->as_LongConstant() != NULL) {
 188     return Assembler::LogicalImmediate(v->type()->as_LongConstant()->value(), false).is_encoded();
 189   }
 190   return false;
 191 }
 192 
 193 
 194 #endif // AARCH64
 195 
 196 
 197 LIR_Opr LIRGenerator::safepoint_poll_register() {
 198   return LIR_OprFact::illegalOpr;
 199 }
 200 
 201 
 202 static LIR_Opr make_constant(BasicType type, jlong c) {
 203   switch (type) {
 204     case T_ADDRESS:
 205     case T_OBJECT:  return LIR_OprFact::intptrConst(c);
 206     case T_LONG:    return LIR_OprFact::longConst(c);
 207     case T_INT:     return LIR_OprFact::intConst(c);
 208     default: ShouldNotReachHere();
 209     return LIR_OprFact::intConst(-1);
 210   }
 211 }
 212 
 213 #ifdef AARCH64
 214 
 215 void LIRGenerator::add_constant(LIR_Opr src, jlong c, LIR_Opr dest) {
 216   if (c == 0) {
 217     __ move(src, dest);
 218     return;
 219   }
 220 
 221   BasicType type = src->type();
 222   bool is_neg = (c < 0);
 223   c = ABS(c);
 224 
 225   if ((c >> 24) == 0) {
 226     for (int shift = 0; shift <= 12; shift += 12) {
 227       int part = ((int)c) & (right_n_bits(12) << shift);
 228       if (part != 0) {
 229         if (is_neg) {
 230           __ sub(src, make_constant(type, part), dest);
 231         } else {
 232           __ add(src, make_constant(type, part), dest);
 233         }
 234         src = dest;
 235       }
 236     }
 237   } else {
 238     __ move(make_constant(type, c), dest);
 239     if (is_neg) {
 240       __ sub(src, dest, dest);
 241     } else {
 242       __ add(src, dest, dest);
 243     }
 244   }
 245 }
 246 
 247 #endif // AARCH64
 248 
 249 
 250 void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) {
 251   assert(c != 0, "must be");
 252 #ifdef AARCH64
 253   add_constant(src, c, dest);
 254 #else
 255   // Find first non-zero bit
 256   int shift = 0;
 257   while ((c & (3 << shift)) == 0) {
 258     shift += 2;
 259   }
 260   // Add the least significant part of the constant
 261   int mask = 0xff << shift;
 262   __ add(src, LIR_OprFact::intConst(c & mask), dest);
 263   // Add up to 3 other parts of the constant;
 264   // each of them can be represented as rotated_imm
 265   if (c & (mask << 8)) {
 266     __ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest);
 267   }
 268   if (c & (mask << 16)) {
 269     __ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest);
 270   }
 271   if (c & (mask << 24)) {
 272     __ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest);
 273   }
 274 #endif // AARCH64
 275 }
 276 
 277 static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) {
 278   return new LIR_Address(base, index, scale, 0, type);
 279 }
 280 
 281 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 282                                             int shift, int disp, BasicType type) {
 283   assert(base->is_register(), "must be");
 284 
 285   if (index->is_constant()) {
 286     disp += index->as_constant_ptr()->as_jint() << shift;
 287     index = LIR_OprFact::illegalOpr;
 288   }
 289 
 290 #ifndef AARCH64
 291   if (base->type() == T_LONG) {
 292     LIR_Opr tmp = new_register(T_INT);
 293     __ convert(Bytecodes::_l2i, base, tmp);
 294     base = tmp;
 295   }
 296   if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) {
 297     LIR_Opr tmp = new_register(T_INT);
 298     __ convert(Bytecodes::_l2i, index, tmp);
 299     index = tmp;
 300   }
 301   // At this point base and index should be all ints and not constants
 302   assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int");
 303   assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int");
 304 #endif
 305 
 306   int max_disp;
 307   bool disp_is_in_range;
 308   bool embedded_shift;
 309 
 310 #ifdef AARCH64
 311   int align = exact_log2(type2aelembytes(type, true));
 312   assert((disp & right_n_bits(align)) == 0, "displacement is not aligned");
 313   assert(shift == 0 || shift == align, "shift should be zero or equal to embedded align");
 314   max_disp = (1 << 12) << align;
 315 
 316   if (disp >= 0) {
 317     disp_is_in_range = Assembler::is_unsigned_imm_in_range(disp, 12, align);
 318   } else {
 319     disp_is_in_range = Assembler::is_imm_in_range(disp, 9, 0);
 320   }
 321 
 322   embedded_shift = true;
 323 #else
 324   switch (type) {
 325     case T_BYTE:
 326     case T_SHORT:
 327     case T_CHAR:
 328       max_disp = 256;          // ldrh, ldrsb encoding has 8-bit offset
 329       embedded_shift = false;
 330       break;
 331     case T_FLOAT:
 332     case T_DOUBLE:
 333       max_disp = 1024;         // flds, fldd have 8-bit offset multiplied by 4
 334       embedded_shift = false;
 335       break;
 336     case T_LONG:
 337       max_disp = 4096;
 338       embedded_shift = false;
 339       break;
 340     default:
 341       max_disp = 4096;         // ldr, ldrb allow 12-bit offset
 342       embedded_shift = true;
 343   }
 344 
 345   disp_is_in_range = (-max_disp < disp && disp < max_disp);
 346 #endif // !AARCH64
 347 
 348   if (index->is_register()) {
 349     LIR_Opr tmp = new_pointer_register();
 350     if (!disp_is_in_range) {
 351       add_large_constant(base, disp, tmp);
 352       base = tmp;
 353       disp = 0;
 354     }
 355     LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type);
 356     if (disp == 0 && embedded_shift) {
 357       // can use ldr/str instruction with register index
 358       return addr;
 359     } else {
 360       LIR_Opr tmp = new_pointer_register();
 361       __ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register
 362       return new LIR_Address(tmp, disp, type);
 363     }
 364   }
 365 
 366   // If the displacement is too large to be inlined into LDR instruction,
 367   // generate large constant with additional sequence of ADD instructions
 368   int excess_disp = disp & ~(max_disp - 1);
 369   if (excess_disp != 0) {
 370     LIR_Opr tmp = new_pointer_register();
 371     add_large_constant(base, excess_disp, tmp);
 372     base = tmp;
 373   }
 374   return new LIR_Address(base, disp & (max_disp - 1), type);
 375 }
 376 
 377 
 378 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 379                                               BasicType type, bool needs_card_mark) {
 380   int base_offset = arrayOopDesc::base_offset_in_bytes(type);
 381   int elem_size = type2aelembytes(type);
 382 
 383   if (index_opr->is_constant()) {
 384     int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size;
 385     if (needs_card_mark) {
 386       LIR_Opr base_opr = new_pointer_register();
 387       add_large_constant(array_opr, offset, base_opr);
 388       return new LIR_Address(base_opr, (intx)0, type);
 389     } else {
 390       return generate_address(array_opr, offset, type);
 391     }
 392   } else {
 393     assert(index_opr->is_register(), "must be");
 394     int scale = exact_log2(elem_size);
 395     if (needs_card_mark) {
 396       LIR_Opr base_opr = new_pointer_register();
 397       LIR_Address* addr = make_address(base_opr, index_opr, (LIR_Address::Scale)scale, type);
 398       __ add(array_opr, LIR_OprFact::intptrConst(base_offset), base_opr);
 399       __ add(base_opr, LIR_OprFact::address(addr), base_opr); // add with shifted/extended register
 400       return new LIR_Address(base_opr, type);
 401     } else {
 402       return generate_address(array_opr, index_opr, scale, base_offset, type);
 403     }
 404   }
 405 }
 406 
 407 
 408 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 409   assert(type == T_LONG || type == T_INT, "should be");
 410   LIR_Opr r = make_constant(type, x);
 411 #ifdef AARCH64
 412   bool imm_in_range = Assembler::LogicalImmediate(x, type == T_INT).is_encoded();
 413 #else
 414   bool imm_in_range = AsmOperand::is_rotated_imm(x);
 415 #endif // AARCH64
 416   if (!imm_in_range) {
 417     LIR_Opr tmp = new_register(type);
 418     __ move(r, tmp);
 419     return tmp;
 420   }
 421   return r;
 422 }
 423 
 424 
 425 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 426   LIR_Opr pointer = new_pointer_register();
 427   __ move(LIR_OprFact::intptrConst(counter), pointer);
 428   LIR_Address* addr = new LIR_Address(pointer, type);
 429   increment_counter(addr, step);
 430 }
 431 
 432 
 433 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 434   LIR_Opr temp = new_register(addr->type());
 435   __ move(addr, temp);
 436   __ add(temp, make_constant(addr->type(), step), temp);
 437   __ move(temp, addr);
 438 }
 439 
 440 
 441 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 442   __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info);
 443   __ cmp(condition, FrameMap::LR_opr, c);
 444 }
 445 
 446 
 447 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 448   __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info);
 449   __ cmp(condition, reg, FrameMap::LR_opr);
 450 }
 451 
 452 
 453 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 454   assert(left != result, "should be different registers");
 455   if (is_power_of_2(c + 1)) {
 456 #ifdef AARCH64
 457     __ shift_left(left, log2_intptr(c + 1), result);
 458     __ sub(result, left, result);
 459 #else
 460     LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1);
 461     LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
 462     __ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register
 463 #endif // AARCH64
 464     return true;
 465   } else if (is_power_of_2(c - 1)) {
 466     LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1);
 467     LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
 468     __ add(left, LIR_OprFact::address(addr), result); // add with shifted register
 469     return true;
 470   }
 471   return false;
 472 }
 473 
 474 
 475 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
 476   assert(item->type() == T_INT, "other types are not expected");
 477   __ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type()));
 478 }
 479 
 480 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
 481   assert(CardTable::dirty_card_val() == 0,
 482     "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
 483 #ifdef AARCH64
 484   // AARCH64 has a register that is constant zero. We can use that one to set the
 485   // value in the card table to dirty.
 486   __ move(FrameMap::ZR_opr, card_addr);
 487 #else // AARCH64
 488   if((ci_card_table_address_as<intx>() & 0xff) == 0) {
 489     // If the card table base address is aligned to 256 bytes, we can use the register
 490     // that contains the card_table_base_address.
 491     __ move(value, card_addr);
 492   } else {
 493     // Otherwise we need to create a register containing that value.
 494     LIR_Opr tmp_zero = new_register(T_INT);
 495     __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
 496     __ move(tmp_zero, card_addr);
 497   }
 498 #endif // AARCH64
 499 }
 500 
 501 void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
 502   assert(addr->is_register(), "must be a register at this point");
 503 
 504   LIR_Opr tmp = FrameMap::LR_ptr_opr;
 505 
 506   // TODO-AARCH64: check performance
 507   bool load_card_table_base_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw());
 508   if (load_card_table_base_const) {
 509     __ move((LIR_Opr)card_table_base, tmp);
 510   } else {
 511     __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
 512   }
 513 
 514 #ifdef AARCH64
 515   LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
 516   LIR_Opr tmp2 = tmp;
 517   __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
 518   LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
 519 #else
 520   // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
 521   // byte instruction does not support the addressing mode we need.
 522   LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
 523 #endif
 524   if (UseCondCardMark) {
 525     if (UseConcMarkSweepGC) {
 526       __ membar_storeload();
 527     }
 528     LIR_Opr cur_value = new_register(T_INT);
 529     __ move(card_addr, cur_value);
 530 
 531     LabelObj* L_already_dirty = new LabelObj();
 532     __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
 533     __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
 534     set_card(tmp, card_addr);
 535     __ branch_destination(L_already_dirty->label());
 536   } else {
 537 #if INCLUDE_ALL_GCS
 538     if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
 539       __ membar_storestore();
 540     }
 541 #endif
 542     set_card(tmp, card_addr);
 543   }
 544 }
 545 
 546 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 547   LIR_Opr tmp1 = FrameMap::R0_oop_opr;
 548   LIR_Opr tmp2 = FrameMap::R1_oop_opr;
 549   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 550   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 551 }
 552 
 553 //----------------------------------------------------------------------
 554 //             visitor functions
 555 //----------------------------------------------------------------------
 556 
 557 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 558   assert(x->is_pinned(),"");
 559   LIRItem obj(x->obj(), this);
 560   obj.load_item();
 561   set_no_result(x);
 562 
 563   LIR_Opr lock = new_pointer_register();
 564   LIR_Opr hdr  = new_pointer_register();
 565 
 566   // Need a scratch register for biased locking on arm
 567   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 568   if(UseBiasedLocking) {
 569     scratch = new_pointer_register();
 570   } else {
 571     scratch = atomicLockOpr();
 572   }
 573 
 574   CodeEmitInfo* info_for_exception = NULL;
 575   if (x->needs_null_check()) {
 576     info_for_exception = state_for(x);
 577   }
 578 
 579   CodeEmitInfo* info = state_for(x, x->state(), true);
 580   monitor_enter(obj.result(), lock, hdr, scratch,
 581                 x->monitor_no(), info_for_exception, info);
 582 }
 583 
 584 
 585 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 586   assert(x->is_pinned(),"");
 587   LIRItem obj(x->obj(), this);
 588   obj.dont_load_item();
 589   set_no_result(x);
 590 
 591   LIR_Opr obj_temp = new_pointer_register();
 592   LIR_Opr lock     = new_pointer_register();
 593   LIR_Opr hdr      = new_pointer_register();
 594 
 595   monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no());
 596 }
 597 
 598 
 599 // _ineg, _lneg, _fneg, _dneg
 600 void LIRGenerator::do_NegateOp(NegateOp* x) {
 601 #ifdef __SOFTFP__
 602   address runtime_func = NULL;
 603   ValueTag tag = x->type()->tag();
 604   if (tag == floatTag) {
 605     runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg);
 606   } else if (tag == doubleTag) {
 607     runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg);
 608   }
 609   if (runtime_func != NULL) {
 610     set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL));
 611     return;
 612   }
 613 #endif // __SOFTFP__
 614   LIRItem value(x->x(), this);
 615   value.load_item();
 616   LIR_Opr reg = rlock_result(x);
 617   __ negate(value.result(), reg);
 618 }
 619 
 620 
 621 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 622 //      _dadd, _dmul, _dsub, _ddiv, _drem
 623 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 624   address runtime_func;
 625   switch (x->op()) {
 626     case Bytecodes::_frem:
 627       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 628       break;
 629     case Bytecodes::_drem:
 630       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 631       break;
 632 #ifdef __SOFTFP__
 633     // Call function compiled with -msoft-float.
 634 
 635       // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
 636 
 637     case Bytecodes::_fadd:
 638       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc);
 639       break;
 640     case Bytecodes::_fmul:
 641       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul);
 642       break;
 643     case Bytecodes::_fsub:
 644       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc);
 645       break;
 646     case Bytecodes::_fdiv:
 647       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv);
 648       break;
 649     case Bytecodes::_dadd:
 650       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc);
 651       break;
 652     case Bytecodes::_dmul:
 653       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul);
 654       break;
 655     case Bytecodes::_dsub:
 656       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc);
 657       break;
 658     case Bytecodes::_ddiv:
 659       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv);
 660       break;
 661     default:
 662       ShouldNotReachHere();
 663 #else // __SOFTFP__
 664     default: {
 665       LIRItem left(x->x(), this);
 666       LIRItem right(x->y(), this);
 667       left.load_item();
 668       right.load_item();
 669       rlock_result(x);
 670       arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
 671       return;
 672     }
 673 #endif // __SOFTFP__
 674   }
 675 
 676   LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
 677   set_result(x, result);
 678 }
 679 
 680 
 681 void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) {
 682   assert(right_arg->is_register(), "must be");
 683   __ cmp(lir_cond_equal, right_arg, make_constant(type, 0));
 684   __ branch(lir_cond_equal, type, new DivByZeroStub(info));
 685 }
 686 
 687 
 688 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 689 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 690   CodeEmitInfo* info = NULL;
 691   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 692     info = state_for(x);
 693   }
 694 
 695 #ifdef AARCH64
 696   LIRItem left(x->x(), this);
 697   LIRItem right(x->y(), this);
 698   LIRItem* left_arg = &left;
 699   LIRItem* right_arg = &right;
 700 
 701   // Test if instr is commutative and if we should swap
 702   if (x->is_commutative() && left.is_constant()) {
 703     left_arg = &right;
 704     right_arg = &left;
 705   }
 706 
 707   left_arg->load_item();
 708   switch (x->op()) {
 709     case Bytecodes::_ldiv:
 710       right_arg->load_item();
 711       make_div_by_zero_check(right_arg->result(), T_LONG, info);
 712       __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL);
 713       break;
 714 
 715     case Bytecodes::_lrem: {
 716       right_arg->load_item();
 717       make_div_by_zero_check(right_arg->result(), T_LONG, info);
 718       // a % b is implemented with 2 instructions:
 719       // tmp = a/b       (sdiv)
 720       // res = a - b*tmp (msub)
 721       LIR_Opr tmp = FrameMap::as_long_opr(Rtemp);
 722       __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL);
 723       break;
 724     }
 725 
 726     case Bytecodes::_lmul:
 727       if (right_arg->is_constant() && is_power_of_2_long(right_arg->get_jlong_constant())) {
 728         right_arg->dont_load_item();
 729         __ shift_left(left_arg->result(), exact_log2_long(right_arg->get_jlong_constant()), rlock_result(x));
 730       } else {
 731         right_arg->load_item();
 732         __ mul(left_arg->result(), right_arg->result(), rlock_result(x));
 733       }
 734       break;
 735 
 736     case Bytecodes::_ladd:
 737     case Bytecodes::_lsub:
 738       if (right_arg->is_constant()) {
 739         jlong c = right_arg->get_jlong_constant();
 740         add_constant(left_arg->result(), (x->op() == Bytecodes::_ladd) ? c : -c, rlock_result(x));
 741       } else {
 742         right_arg->load_item();
 743         arithmetic_op_long(x->op(), rlock_result(x), left_arg->result(), right_arg->result(), NULL);
 744       }
 745       break;
 746 
 747     default:
 748       ShouldNotReachHere();
 749   }
 750 #else
 751   switch (x->op()) {
 752     case Bytecodes::_ldiv:
 753     case Bytecodes::_lrem: {
 754       LIRItem right(x->y(), this);
 755       right.load_item();
 756       make_div_by_zero_check(right.result(), T_LONG, info);
 757     }
 758     // Fall through
 759     case Bytecodes::_lmul: {
 760       address entry;
 761       switch (x->op()) {
 762       case Bytecodes::_lrem:
 763         entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
 764         break;
 765       case Bytecodes::_ldiv:
 766         entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
 767         break;
 768       case Bytecodes::_lmul:
 769         entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
 770         break;
 771       default:
 772         ShouldNotReachHere();
 773       }
 774       LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
 775       set_result(x, result);
 776       break;
 777     }
 778     case Bytecodes::_ladd:
 779     case Bytecodes::_lsub: {
 780       LIRItem left(x->x(), this);
 781       LIRItem right(x->y(), this);
 782       left.load_item();
 783       right.load_item();
 784       rlock_result(x);
 785       arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 786       break;
 787     }
 788     default:
 789       ShouldNotReachHere();
 790   }
 791 #endif // AARCH64
 792 }
 793 
 794 
 795 // for: _iadd, _imul, _isub, _idiv, _irem
 796 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 797   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
 798   LIRItem left(x->x(), this);
 799   LIRItem right(x->y(), this);
 800   LIRItem* left_arg = &left;
 801   LIRItem* right_arg = &right;
 802 
 803   // Test if instr is commutative and if we should swap
 804   if (x->is_commutative() && left.is_constant()) {
 805     left_arg = &right;
 806     right_arg = &left;
 807   }
 808 
 809   if (is_div_rem) {
 810     CodeEmitInfo* info = state_for(x);
 811     if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) {
 812       left_arg->load_item();
 813       right_arg->dont_load_item();
 814       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 815       LIR_Opr result = rlock_result(x);
 816       __ idiv(left_arg->result(), right_arg->result(), result, tmp, info);
 817     } else {
 818 #ifdef AARCH64
 819       left_arg->load_item();
 820       right_arg->load_item();
 821       make_div_by_zero_check(right_arg->result(), T_INT, info);
 822       if (x->op() == Bytecodes::_idiv) {
 823         __ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL);
 824       } else {
 825         // a % b is implemented with 2 instructions:
 826         // tmp = a/b       (sdiv)
 827         // res = a - b*tmp (msub)
 828         LIR_Opr tmp = FrameMap::as_opr(Rtemp);
 829         __ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL);
 830       }
 831 #else
 832       left_arg->load_item_force(FrameMap::R0_opr);
 833       right_arg->load_item_force(FrameMap::R2_opr);
 834       LIR_Opr tmp = FrameMap::R1_opr;
 835       LIR_Opr result = rlock_result(x);
 836       LIR_Opr out_reg;
 837       if (x->op() == Bytecodes::_irem) {
 838         out_reg = FrameMap::R0_opr;
 839         __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info);
 840       } else if (x->op() == Bytecodes::_idiv) {
 841         out_reg = FrameMap::R1_opr;
 842         __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info);
 843       }
 844       __ move(out_reg, result);
 845 #endif // AARCH64
 846     }
 847 
 848 #ifdef AARCH64
 849   } else if (((x->op() == Bytecodes::_iadd) || (x->op() == Bytecodes::_isub)) && right_arg->is_constant()) {
 850     left_arg->load_item();
 851     jint c = right_arg->get_jint_constant();
 852     right_arg->dont_load_item();
 853     add_constant(left_arg->result(), (x->op() == Bytecodes::_iadd) ? c : -c, rlock_result(x));
 854 #endif // AARCH64
 855 
 856   } else {
 857     left_arg->load_item();
 858     if (x->op() == Bytecodes::_imul && right_arg->is_constant()) {
 859       jint c = right_arg->get_jint_constant();
 860       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 861         right_arg->dont_load_item();
 862       } else {
 863         right_arg->load_item();
 864       }
 865     } else {
 866       AARCH64_ONLY(assert(!right_arg->is_constant(), "constant right_arg is already handled by this moment");)
 867       right_arg->load_nonconstant();
 868     }
 869     rlock_result(x);
 870     assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right");
 871     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL);
 872   }
 873 }
 874 
 875 
 876 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 877   ValueTag tag = x->type()->tag();
 878   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 879   switch (tag) {
 880     case floatTag:
 881     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 882     case longTag:    do_ArithmeticOp_Long(x); return;
 883     case intTag:     do_ArithmeticOp_Int(x);  return;
 884   }
 885   ShouldNotReachHere();
 886 }
 887 
 888 
 889 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 890 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 891   LIRItem value(x->x(), this);
 892   LIRItem count(x->y(), this);
 893 
 894 #ifndef AARCH64
 895   if (value.type()->is_long()) {
 896     count.set_destroys_register();
 897   }
 898 #endif // !AARCH64
 899 
 900   if (count.is_constant()) {
 901     assert(count.type()->as_IntConstant() != NULL, "should be");
 902     count.dont_load_item();
 903   } else {
 904     count.load_item();
 905   }
 906   value.load_item();
 907 
 908   LIR_Opr res = rlock_result(x);
 909   shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
 910 }
 911 
 912 
 913 // _iand, _land, _ior, _lor, _ixor, _lxor
 914 void LIRGenerator::do_LogicOp(LogicOp* x) {
 915   LIRItem left(x->x(), this);
 916   LIRItem right(x->y(), this);
 917 
 918   left.load_item();
 919 
 920 #ifdef AARCH64
 921   if (right.is_constant() && can_inline_as_constant_in_logic(right.value())) {
 922     right.dont_load_item();
 923   } else {
 924     right.load_item();
 925   }
 926 #else
 927   right.load_nonconstant();
 928 #endif // AARCH64
 929 
 930   logic_op(x->op(), rlock_result(x), left.result(), right.result());
 931 }
 932 
 933 
 934 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 935 void LIRGenerator::do_CompareOp(CompareOp* x) {
 936 #ifdef __SOFTFP__
 937   address runtime_func;
 938   switch (x->op()) {
 939     case Bytecodes::_fcmpl:
 940       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl);
 941       break;
 942     case Bytecodes::_fcmpg:
 943       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg);
 944       break;
 945     case Bytecodes::_dcmpl:
 946       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl);
 947       break;
 948     case Bytecodes::_dcmpg:
 949       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg);
 950       break;
 951     case Bytecodes::_lcmp: {
 952         LIRItem left(x->x(), this);
 953         LIRItem right(x->y(), this);
 954         left.load_item();
 955         right.load_nonconstant();
 956         LIR_Opr reg = rlock_result(x);
 957          __ lcmp2int(left.result(), right.result(), reg);
 958         return;
 959       }
 960     default:
 961       ShouldNotReachHere();
 962   }
 963   LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
 964   set_result(x, result);
 965 #else // __SOFTFP__
 966   LIRItem left(x->x(), this);
 967   LIRItem right(x->y(), this);
 968   left.load_item();
 969 
 970 #ifdef AARCH64
 971   if (right.is_constant() && can_inline_as_constant_in_cmp(right.value())) {
 972     right.dont_load_item();
 973   } else {
 974     right.load_item();
 975   }
 976 #else
 977   right.load_nonconstant();
 978 #endif // AARCH64
 979 
 980   LIR_Opr reg = rlock_result(x);
 981 
 982   if (x->x()->type()->is_float_kind()) {
 983     Bytecodes::Code code = x->op();
 984     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 985   } else if (x->x()->type()->tag() == longTag) {
 986     __ lcmp2int(left.result(), right.result(), reg);
 987   } else {
 988     ShouldNotReachHere();
 989   }
 990 #endif // __SOFTFP__
 991 }
 992 
 993 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 994   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 995   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
 996   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
 997   new_value.load_item();
 998   cmp_value.load_item();
 999   LIR_Opr result = new_register(T_INT);
1000   if (type == T_OBJECT || type == T_ARRAY) {
1001 #ifdef AARCH64
1002     if (UseCompressedOops) {
1003       tmp1 = new_pointer_register();
1004       tmp2 = new_pointer_register();
1005     }
1006 #endif
1007     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
1008   } else if (type == T_INT) {
1009     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
1010   } else if (type == T_LONG) {
1011 #ifndef AARCH64
1012     tmp1 = new_register(T_LONG);
1013 #endif // !AARCH64
1014     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
1015   } else {
1016     ShouldNotReachHere();
1017   }
1018   return result;
1019 }
1020 
1021 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
1022   bool is_oop = type == T_OBJECT || type == T_ARRAY;
1023   LIR_Opr result = new_register(type);
1024   value.load_item();
1025   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
1026   LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
1027   __ xchg(addr_ptr, data, dst, tmp);
1028   return result;
1029 }
1030 
1031 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
1032   LIR_Opr result = new_register(type);
1033   value.load_item();
1034   assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
1035   LIR_Opr tmp = new_register(type);
1036   __ xadd(addr, value.result(), result, tmp);
1037   return result;
1038 }
1039 
1040 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
1041   address runtime_func;
1042   switch (x->id()) {
1043     case vmIntrinsics::_dabs: {
1044 #ifdef __SOFTFP__
1045       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
1046       break;
1047 #else
1048       assert(x->number_of_arguments() == 1, "wrong type");
1049       LIRItem value(x->argument_at(0), this);
1050       value.load_item();
1051       __ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
1052       return;
1053 #endif // __SOFTFP__
1054     }
1055     case vmIntrinsics::_dsqrt: {
1056 #ifdef __SOFTFP__
1057       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
1058       break;
1059 #else
1060       assert(x->number_of_arguments() == 1, "wrong type");
1061       LIRItem value(x->argument_at(0), this);
1062       value.load_item();
1063       __ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
1064       return;
1065 #endif // __SOFTFP__
1066     }
1067     case vmIntrinsics::_dsin:
1068       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
1069       break;
1070     case vmIntrinsics::_dcos:
1071       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
1072       break;
1073     case vmIntrinsics::_dtan:
1074       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
1075       break;
1076     case vmIntrinsics::_dlog:
1077       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
1078       break;
1079     case vmIntrinsics::_dlog10:
1080       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
1081       break;
1082     case vmIntrinsics::_dexp:
1083       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
1084       break;
1085     case vmIntrinsics::_dpow:
1086       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
1087       break;
1088     default:
1089       ShouldNotReachHere();
1090       return;
1091   }
1092 
1093   LIR_Opr result;
1094   if (x->number_of_arguments() == 1) {
1095     result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL);
1096   } else {
1097     assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic");
1098     result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL);
1099   }
1100   set_result(x, result);
1101 }
1102 
1103 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1104   fatal("FMA intrinsic is not implemented on this platform");
1105 }
1106 
1107 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1108   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1109 }
1110 
1111 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
1112   CodeEmitInfo* info = state_for(x, x->state());
1113   assert(x->number_of_arguments() == 5, "wrong type");
1114   LIRItem src(x->argument_at(0), this);
1115   LIRItem src_pos(x->argument_at(1), this);
1116   LIRItem dst(x->argument_at(2), this);
1117   LIRItem dst_pos(x->argument_at(3), this);
1118   LIRItem length(x->argument_at(4), this);
1119 
1120   // We put arguments into the same registers which are used for a Java call.
1121   // Note: we used fixed registers for all arguments because all registers
1122   // are caller-saved, so register allocator treats them all as used.
1123   src.load_item_force    (FrameMap::R0_oop_opr);
1124   src_pos.load_item_force(FrameMap::R1_opr);
1125   dst.load_item_force    (FrameMap::R2_oop_opr);
1126   dst_pos.load_item_force(FrameMap::R3_opr);
1127   length.load_item_force (FrameMap::R4_opr);
1128   LIR_Opr tmp =          (FrameMap::R5_opr);
1129   set_no_result(x);
1130 
1131   int flags;
1132   ciArrayKlass* expected_type;
1133   arraycopy_helper(x, &flags, &expected_type);
1134   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(),
1135                tmp, expected_type, flags, info);
1136 }
1137 
1138 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1139   fatal("CRC32 intrinsic is not implemented on this platform");
1140 }
1141 
1142 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1143   Unimplemented();
1144 }
1145 
1146 void LIRGenerator::do_Convert(Convert* x) {
1147   address runtime_func;
1148   switch (x->op()) {
1149 #ifndef AARCH64
1150     case Bytecodes::_l2f:
1151       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
1152       break;
1153     case Bytecodes::_l2d:
1154       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
1155       break;
1156     case Bytecodes::_f2l:
1157       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
1158       break;
1159     case Bytecodes::_d2l:
1160       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
1161       break;
1162 #ifdef __SOFTFP__
1163     case Bytecodes::_f2d:
1164       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d);
1165       break;
1166     case Bytecodes::_d2f:
1167       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f);
1168       break;
1169     case Bytecodes::_i2f:
1170       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f);
1171       break;
1172     case Bytecodes::_i2d:
1173       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d);
1174       break;
1175     case Bytecodes::_f2i:
1176       runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz);
1177       break;
1178     case Bytecodes::_d2i:
1179       // This is implemented in hard float in assembler on arm but a call
1180       // on other platforms.
1181       runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
1182       break;
1183 #endif // __SOFTFP__
1184 #endif // !AARCH64
1185     default: {
1186       LIRItem value(x->value(), this);
1187       value.load_item();
1188       LIR_Opr reg = rlock_result(x);
1189       __ convert(x->op(), value.result(), reg, NULL);
1190       return;
1191     }
1192   }
1193 
1194   LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL);
1195   set_result(x, result);
1196 }
1197 
1198 
1199 void LIRGenerator::do_NewInstance(NewInstance* x) {
1200   print_if_not_loaded(x);
1201 
1202   CodeEmitInfo* info = state_for(x, x->state());
1203   LIR_Opr reg = result_register_for(x->type());  // R0 is required by runtime call in NewInstanceStub::emit_code
1204   LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code
1205   LIR_Opr tmp1 = new_register(objectType);
1206   LIR_Opr tmp2 = new_register(objectType);
1207   LIR_Opr tmp3 = FrameMap::LR_oop_opr;
1208 
1209   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3,
1210                LIR_OprFact::illegalOpr, klass_reg, info);
1211 
1212   LIR_Opr result = rlock_result(x);
1213   __ move(reg, result);
1214 }
1215 
1216 
1217 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1218   // Evaluate state_for() first, because it can emit code
1219   // with the same fixed registers that are used here (R1, R2)
1220   CodeEmitInfo* info = state_for(x, x->state());
1221   LIRItem length(x->length(), this);
1222 
1223   length.load_item_force(FrameMap::R2_opr);      // R2 is required by runtime call in NewTypeArrayStub::emit_code
1224   LIR_Opr len = length.result();
1225 
1226   LIR_Opr reg = result_register_for(x->type());  // R0 is required by runtime call in NewTypeArrayStub::emit_code
1227   LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code
1228 
1229   LIR_Opr tmp1 = new_register(objectType);
1230   LIR_Opr tmp2 = new_register(objectType);
1231   LIR_Opr tmp3 = FrameMap::LR_oop_opr;
1232   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
1233 
1234   BasicType elem_type = x->elt_type();
1235   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1236 
1237   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1238   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1239 
1240   LIR_Opr result = rlock_result(x);
1241   __ move(reg, result);
1242 }
1243 
1244 
1245 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1246   // Evaluate state_for() first, because it can emit code
1247   // with the same fixed registers that are used here (R1, R2)
1248   CodeEmitInfo* info = state_for(x, x->state());
1249   LIRItem length(x->length(), this);
1250 
1251   length.load_item_force(FrameMap::R2_opr);           // R2 is required by runtime call in NewObjectArrayStub::emit_code
1252   LIR_Opr len = length.result();
1253 
1254   CodeEmitInfo* patching_info = NULL;
1255   if (!x->klass()->is_loaded() || PatchALot) {
1256     patching_info = state_for(x, x->state_before());
1257   }
1258 
1259   LIR_Opr reg = result_register_for(x->type());       // R0 is required by runtime call in NewObjectArrayStub::emit_code
1260   LIR_Opr klass_reg = FrameMap::R1_metadata_opr;      // R1 is required by runtime call in NewObjectArrayStub::emit_code
1261 
1262   LIR_Opr tmp1 = new_register(objectType);
1263   LIR_Opr tmp2 = new_register(objectType);
1264   LIR_Opr tmp3 = FrameMap::LR_oop_opr;
1265   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
1266 
1267   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1268   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
1269   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1270     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1271   }
1272   klass2reg_with_patching(klass_reg, obj, patching_info);
1273   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1274 
1275   LIR_Opr result = rlock_result(x);
1276   __ move(reg, result);
1277 }
1278 
1279 
1280 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1281   Values* dims = x->dims();
1282   int i = dims->length();
1283   LIRItemList* items = new LIRItemList(i, i, NULL);
1284   while (i-- > 0) {
1285     LIRItem* size = new LIRItem(dims->at(i), this);
1286     items->at_put(i, size);
1287   }
1288 
1289   // Need to get the info before, as the items may become invalid through item_free
1290   CodeEmitInfo* patching_info = NULL;
1291   if (!x->klass()->is_loaded() || PatchALot) {
1292     patching_info = state_for(x, x->state_before());
1293 
1294     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1295     // clone all handlers (NOTE: Usually this is handled transparently
1296     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1297     // is done explicitly here because a stub isn't being used).
1298     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1299   }
1300 
1301   i = dims->length();
1302   while (i-- > 0) {
1303     LIRItem* size = items->at(i);
1304     size->load_item();
1305     LIR_Opr sz = size->result();
1306     assert(sz->type() == T_INT, "should be");
1307     store_stack_parameter(sz, in_ByteSize(i * BytesPerInt));
1308   }
1309 
1310   CodeEmitInfo* info = state_for(x, x->state());
1311   LIR_Opr klass_reg = FrameMap::R0_metadata_opr;
1312   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1313 
1314   LIR_Opr rank = FrameMap::R2_opr;
1315   __ move(LIR_OprFact::intConst(x->rank()), rank);
1316   LIR_Opr varargs = FrameMap::SP_opr;
1317   LIR_OprList* args = new LIR_OprList(3);
1318   args->append(klass_reg);
1319   args->append(rank);
1320   args->append(varargs);
1321   LIR_Opr reg = result_register_for(x->type());
1322   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1323                   LIR_OprFact::illegalOpr, reg, args, info);
1324 
1325   LIR_Opr result = rlock_result(x);
1326   __ move(reg, result);
1327 }
1328 
1329 
1330 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1331   // nothing to do for now
1332 }
1333 
1334 
1335 void LIRGenerator::do_CheckCast(CheckCast* x) {
1336   LIRItem obj(x->obj(), this);
1337   CodeEmitInfo* patching_info = NULL;
1338   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1339     patching_info = state_for(x, x->state_before());
1340   }
1341 
1342   obj.load_item();
1343 
1344   CodeEmitInfo* info_for_exception =
1345     (x->needs_exception_state() ? state_for(x) :
1346                                   state_for(x, x->state_before(), true /*ignore_xhandler*/));
1347 
1348   CodeStub* stub;
1349   if (x->is_incompatible_class_change_check()) {
1350     assert(patching_info == NULL, "can't patch this");
1351     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1352                                    LIR_OprFact::illegalOpr, info_for_exception);
1353   } else if (x->is_invokespecial_receiver_check()) {
1354     assert(patching_info == NULL, "can't patch this");
1355     stub = new DeoptimizeStub(info_for_exception,
1356                               Deoptimization::Reason_class_check,
1357                               Deoptimization::Action_none);
1358   } else {
1359     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
1360                                    LIR_OprFact::illegalOpr, info_for_exception);
1361   }
1362 
1363   LIR_Opr out_reg = rlock_result(x);
1364   LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1365   LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1366   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1367 
1368   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(),
1369                info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci());
1370 }
1371 
1372 
1373 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1374   LIRItem obj(x->obj(), this);
1375   CodeEmitInfo* patching_info = NULL;
1376   if (!x->klass()->is_loaded() || PatchALot) {
1377     patching_info = state_for(x, x->state_before());
1378   }
1379 
1380   obj.load_item();
1381   LIR_Opr out_reg = rlock_result(x);
1382   LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1383   LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1384   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1385 
1386   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1387                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1388 }
1389 
1390 
1391 #ifdef __SOFTFP__
1392 // Turn operator if (f <op> g) into runtime call:
1393 //     call _aeabi_fcmp<op>(f, g)
1394 //     cmp(eq, 1)
1395 //     branch(eq, true path).
1396 void LIRGenerator::do_soft_float_compare(If* x) {
1397   assert(x->number_of_sux() == 2, "inconsistency");
1398   ValueTag tag = x->x()->type()->tag();
1399   If::Condition cond = x->cond();
1400   address runtime_func;
1401   // unordered comparison gets the wrong answer because aeabi functions
1402   //  return false.
1403   bool unordered_is_true = x->unordered_is_true();
1404   // reverse of condition for ne
1405   bool compare_to_zero = false;
1406   switch (lir_cond(cond)) {
1407     case lir_cond_notEqual:
1408       compare_to_zero = true;  // fall through
1409     case lir_cond_equal:
1410       runtime_func = tag == floatTag ?
1411           CAST_FROM_FN_PTR(address, __aeabi_fcmpeq):
1412           CAST_FROM_FN_PTR(address, __aeabi_dcmpeq);
1413       break;
1414     case lir_cond_less:
1415       if (unordered_is_true) {
1416         runtime_func = tag == floatTag ?
1417           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt):
1418           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt);
1419       } else {
1420         runtime_func = tag == floatTag ?
1421           CAST_FROM_FN_PTR(address, __aeabi_fcmplt):
1422           CAST_FROM_FN_PTR(address, __aeabi_dcmplt);
1423       }
1424       break;
1425     case lir_cond_lessEqual:
1426       if (unordered_is_true) {
1427         runtime_func = tag == floatTag ?
1428           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple):
1429           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple);
1430       } else {
1431         runtime_func = tag == floatTag ?
1432           CAST_FROM_FN_PTR(address, __aeabi_fcmple):
1433           CAST_FROM_FN_PTR(address, __aeabi_dcmple);
1434       }
1435       break;
1436     case lir_cond_greaterEqual:
1437       if (unordered_is_true) {
1438         runtime_func = tag == floatTag ?
1439           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge):
1440           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge);
1441       } else {
1442         runtime_func = tag == floatTag ?
1443           CAST_FROM_FN_PTR(address, __aeabi_fcmpge):
1444           CAST_FROM_FN_PTR(address, __aeabi_dcmpge);
1445       }
1446       break;
1447     case lir_cond_greater:
1448       if (unordered_is_true) {
1449         runtime_func = tag == floatTag ?
1450           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt):
1451           CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt);
1452       } else {
1453         runtime_func = tag == floatTag ?
1454           CAST_FROM_FN_PTR(address, __aeabi_fcmpgt):
1455           CAST_FROM_FN_PTR(address, __aeabi_dcmpgt);
1456       }
1457       break;
1458     case lir_cond_aboveEqual:
1459     case lir_cond_belowEqual:
1460       ShouldNotReachHere();  // We're not going to get these.
1461     default:
1462       assert(lir_cond(cond) == lir_cond_always, "must be");
1463       ShouldNotReachHere();
1464   }
1465   set_no_result(x);
1466 
1467   // add safepoint before generating condition code so it can be recomputed
1468   if (x->is_safepoint()) {
1469     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1470     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1471   }
1472   // Call float compare function, returns (1,0) if true or false.
1473   LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL);
1474   __ cmp(lir_cond_equal, result,
1475          compare_to_zero ?
1476            LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1));
1477   profile_branch(x, cond);
1478   move_to_phi(x->state());
1479   __ branch(lir_cond_equal, T_INT, x->tsux());
1480 }
1481 #endif // __SOFTFP__
1482 
1483 void LIRGenerator::do_If(If* x) {
1484   assert(x->number_of_sux() == 2, "inconsistency");
1485   ValueTag tag = x->x()->type()->tag();
1486 
1487 #ifdef __SOFTFP__
1488   if (tag == floatTag || tag == doubleTag) {
1489     do_soft_float_compare(x);
1490     assert(x->default_sux() == x->fsux(), "wrong destination above");
1491     __ jump(x->default_sux());
1492     return;
1493   }
1494 #endif // __SOFTFP__
1495 
1496   LIRItem xitem(x->x(), this);
1497   LIRItem yitem(x->y(), this);
1498   LIRItem* xin = &xitem;
1499   LIRItem* yin = &yitem;
1500   If::Condition cond = x->cond();
1501 
1502 #ifndef AARCH64
1503   if (tag == longTag) {
1504     if (cond == If::gtr || cond == If::leq) {
1505       cond = Instruction::mirror(cond);
1506       xin = &yitem;
1507       yin = &xitem;
1508     }
1509     xin->set_destroys_register();
1510   }
1511 #endif // !AARCH64
1512 
1513   xin->load_item();
1514   LIR_Opr left = xin->result();
1515   LIR_Opr right;
1516 
1517 #ifdef AARCH64
1518   if (yin->is_constant() && can_inline_as_constant_in_cmp(yin->value())) {
1519     yin->dont_load_item();
1520   } else {
1521     yin->load_item();
1522   }
1523   right = yin->result();
1524 #else
1525   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1526       (cond == If::eql || cond == If::neq)) {
1527     // inline long zero
1528     right = LIR_OprFact::value_type(yin->value()->type());
1529   } else {
1530     yin->load_nonconstant();
1531     right = yin->result();
1532   }
1533 #endif // AARCH64
1534 
1535   set_no_result(x);
1536 
1537   // add safepoint before generating condition code so it can be recomputed
1538   if (x->is_safepoint()) {
1539     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1540     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1541   }
1542 
1543   __ cmp(lir_cond(cond), left, right);
1544   profile_branch(x, cond);
1545   move_to_phi(x->state());
1546   if (x->x()->type()->is_float_kind()) {
1547     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1548   } else {
1549     __ branch(lir_cond(cond), right->type(), x->tsux());
1550   }
1551   assert(x->default_sux() == x->fsux(), "wrong destination above");
1552   __ jump(x->default_sux());
1553 }
1554 
1555 
1556 LIR_Opr LIRGenerator::getThreadPointer() {
1557   return FrameMap::Rthread_opr;
1558 }
1559 
1560 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1561   __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr);
1562   LIR_OprList* args = new LIR_OprList(1);
1563   args->append(FrameMap::R0_opr);
1564   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1565   __ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args);
1566 }
1567 
1568 
1569 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1570                                         CodeEmitInfo* info) {
1571 #ifndef AARCH64
1572   if (value->is_double_cpu()) {
1573     assert(address->index()->is_illegal(), "should have a constant displacement");
1574     LIR_Opr tmp = new_pointer_register();
1575     add_large_constant(address->base(), address->disp(), tmp);
1576     __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info);
1577     return;
1578   }
1579 #endif // !AARCH64
1580   // TODO-AARCH64 implement with stlr instruction
1581   __ store(value, address, info, lir_patch_none);
1582 }
1583 
1584 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1585                                        CodeEmitInfo* info) {
1586 #ifndef AARCH64
1587   if (result->is_double_cpu()) {
1588     assert(address->index()->is_illegal(), "should have a constant displacement");
1589     LIR_Opr tmp = new_pointer_register();
1590     add_large_constant(address->base(), address->disp(), tmp);
1591     __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info);
1592     return;
1593   }
1594 #endif // !AARCH64
1595   // TODO-AARCH64 implement with ldar instruction
1596   __ load(address, result, info, lir_patch_none);
1597 }