1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 
  55 void LIRItem::load_nonconstant() {
  56   LIR_Opr r = value()->operand();
  57   if (r->is_constant()) {
  58     _result = r;
  59   } else {
  60     load_item();
  61   }
  62 }
  63 
  64 //--------------------------------------------------------------
  65 //               LIRGenerator
  66 //--------------------------------------------------------------
  67 
  68 
  69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
  70 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
  71 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  72 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  73 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  74 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  76 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
  77 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  78 
  79 
  80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  81   LIR_Opr opr;
  82   switch (type->tag()) {
  83     case intTag:     opr = FrameMap::r0_opr;          break;
  84     case objectTag:  opr = FrameMap::r0_oop_opr;      break;
  85     case longTag:    opr = FrameMap::long0_opr;        break;
  86     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  87     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  88 
  89     case addressTag:
  90     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  91   }
  92 
  93   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  94   return opr;
  95 }
  96 
  97 
  98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  99   LIR_Opr reg = new_register(T_INT);
 100   set_vreg_flag(reg, LIRGenerator::byte_reg);
 101   return reg;
 102 }
 103 
 104 
 105 //--------- loading items into registers --------------------------------
 106 
 107 
 108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 109   if (v->type()->as_IntConstant() != NULL) {
 110     return v->type()->as_IntConstant()->value() == 0L;
 111   } else if (v->type()->as_LongConstant() != NULL) {
 112     return v->type()->as_LongConstant()->value() == 0L;
 113   } else if (v->type()->as_ObjectConstant() != NULL) {
 114     return v->type()->as_ObjectConstant()->value()->is_null_object();
 115   } else {
 116     return false;
 117   }
 118 }
 119 
 120 bool LIRGenerator::can_inline_as_constant(Value v) const {
 121   // FIXME: Just a guess
 122   if (v->type()->as_IntConstant() != NULL) {
 123     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 124   } else if (v->type()->as_LongConstant() != NULL) {
 125     return v->type()->as_LongConstant()->value() == 0L;
 126   } else if (v->type()->as_ObjectConstant() != NULL) {
 127     return v->type()->as_ObjectConstant()->value()->is_null_object();
 128   } else {
 129     return false;
 130   }
 131 }
 132 
 133 
 134 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
 135 
 136 
 137 LIR_Opr LIRGenerator::safepoint_poll_register() {
 138   return LIR_OprFact::illegalOpr;
 139 }
 140 
 141 
 142 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 143                                             int shift, int disp, BasicType type) {
 144   assert(base->is_register(), "must be");
 145   intx large_disp = disp;
 146 
 147   // accumulate fixed displacements
 148   if (index->is_constant()) {
 149     LIR_Const *constant = index->as_constant_ptr();
 150     if (constant->type() == T_INT) {
 151       large_disp += index->as_jint() << shift;
 152     } else {
 153       assert(constant->type() == T_LONG, "should be");
 154       jlong c = index->as_jlong() << shift;
 155       if ((jlong)((jint)c) == c) {
 156         large_disp += c;
 157         index = LIR_OprFact::illegalOpr;
 158       } else {
 159         LIR_Opr tmp = new_register(T_LONG);
 160         __ move(index, tmp);
 161         index = tmp;
 162         // apply shift and displacement below
 163       }
 164     }
 165   }
 166 
 167   if (index->is_register()) {
 168     // apply the shift and accumulate the displacement
 169     if (shift > 0) {
 170       LIR_Opr tmp = new_pointer_register();
 171       __ shift_left(index, shift, tmp);
 172       index = tmp;
 173     }
 174     if (large_disp != 0) {
 175       LIR_Opr tmp = new_pointer_register();
 176       if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
 177         __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
 178         index = tmp;
 179       } else {
 180         __ move(tmp, LIR_OprFact::intptrConst(large_disp));
 181         __ add(tmp, index, tmp);
 182         index = tmp;
 183       }
 184       large_disp = 0;
 185     }
 186   } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
 187     // index is illegal so replace it with the displacement loaded into a register
 188     index = new_pointer_register();
 189     __ move(LIR_OprFact::intptrConst(large_disp), index);
 190     large_disp = 0;
 191   }
 192 
 193   // at this point we either have base + index or base + displacement
 194   if (large_disp == 0) {
 195     return new LIR_Address(base, index, type);
 196   } else {
 197     assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
 198     return new LIR_Address(base, large_disp, type);
 199   }
 200 }
 201 
 202 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 203                                               BasicType type) {
 204   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 205   int elem_size = type2aelembytes(type);
 206   int shift = exact_log2(elem_size);
 207 
 208   LIR_Address* addr;
 209   if (index_opr->is_constant()) {
 210     addr = new LIR_Address(array_opr,
 211                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 212   } else {
 213     if (offset_in_bytes) {
 214       LIR_Opr tmp = new_pointer_register();
 215       __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
 216       array_opr = tmp;
 217       offset_in_bytes = 0;
 218     }
 219     addr =  new LIR_Address(array_opr,
 220                             index_opr,
 221                             LIR_Address::scale(type),
 222                             offset_in_bytes, type);
 223   }
 224   return addr;
 225 }
 226 
 227 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 228   LIR_Opr r;
 229   if (type == T_LONG) {
 230     r = LIR_OprFact::longConst(x);
 231     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 232       LIR_Opr tmp = new_register(type);
 233       __ move(r, tmp);
 234       return tmp;
 235     }
 236   } else if (type == T_INT) {
 237     r = LIR_OprFact::intConst(x);
 238     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 239       // This is all rather nasty.  We don't know whether our constant
 240       // is required for a logical or an arithmetic operation, wo we
 241       // don't know what the range of valid values is!!
 242       LIR_Opr tmp = new_register(type);
 243       __ move(r, tmp);
 244       return tmp;
 245     }
 246   } else {
 247     ShouldNotReachHere();
 248     r = NULL;  // unreachable
 249   }
 250   return r;
 251 }
 252 
 253 
 254 
 255 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 256   LIR_Opr pointer = new_pointer_register();
 257   __ move(LIR_OprFact::intptrConst(counter), pointer);
 258   LIR_Address* addr = new LIR_Address(pointer, type);
 259   increment_counter(addr, step);
 260 }
 261 
 262 
 263 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 264   LIR_Opr imm = NULL;
 265   switch(addr->type()) {
 266   case T_INT:
 267     imm = LIR_OprFact::intConst(step);
 268     break;
 269   case T_LONG:
 270     imm = LIR_OprFact::longConst(step);
 271     break;
 272   default:
 273     ShouldNotReachHere();
 274   }
 275   LIR_Opr reg = new_register(addr->type());
 276   __ load(addr, reg);
 277   __ add(reg, imm, reg);
 278   __ store(reg, addr);
 279 }
 280 
 281 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 282   LIR_Opr reg = new_register(T_INT);
 283   __ load(generate_address(base, disp, T_INT), reg, info);
 284   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 285 }
 286 
 287 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 288   LIR_Opr reg1 = new_register(T_INT);
 289   __ load(generate_address(base, disp, type), reg1, info);
 290   __ cmp(condition, reg, reg1);
 291 }
 292 
 293 
 294 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 295 
 296   if (is_power_of_2(c - 1)) {
 297     __ shift_left(left, exact_log2(c - 1), tmp);
 298     __ add(tmp, left, result);
 299     return true;
 300   } else if (is_power_of_2(c + 1)) {
 301     __ shift_left(left, exact_log2(c + 1), tmp);
 302     __ sub(tmp, left, result);
 303     return true;
 304   } else {
 305     return false;
 306   }
 307 }
 308 
 309 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 310   BasicType type = item->type();
 311   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 312 }
 313 
 314 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 315     LIR_Opr tmp1 = new_register(objectType);
 316     LIR_Opr tmp2 = new_register(objectType);
 317     LIR_Opr tmp3 = new_register(objectType);
 318     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 319 }
 320 
 321 void LIRGenerator::flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info) {
 322   LIR_Opr tmp1 = new_register(T_METADATA);
 323   LIR_Opr tmp2 = new_register(T_METADATA);
 324 
 325   __ metadata2reg(element_klass->constant_encoding(), tmp2);
 326   __ flattened_store_check(value, element_klass, tmp1, tmp2, store_check_info);
 327 }
 328 
 329 
 330 //----------------------------------------------------------------------
 331 //             visitor functions
 332 //----------------------------------------------------------------------
 333 
 334 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 335   assert(x->is_pinned(),"");
 336   LIRItem obj(x->obj(), this);
 337   obj.load_item();
 338 
 339   set_no_result(x);
 340 
 341   // "lock" stores the address of the monitor stack slot, so this is not an oop
 342   LIR_Opr lock = new_register(T_INT);
 343   // Need a scratch register for biased locking
 344   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 345   if (UseBiasedLocking || x->maybe_valuetype()) {
 346     scratch = new_register(T_INT);
 347   }
 348 
 349   CodeEmitInfo* info_for_exception = NULL;
 350   if (x->needs_null_check()) {
 351     info_for_exception = state_for(x);
 352   }
 353 
 354   CodeStub* throw_imse_stub = 
 355       x->maybe_valuetype() ?
 356       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
 357       NULL;
 358 
 359   // this CodeEmitInfo must not have the xhandlers because here the
 360   // object is already locked (xhandlers expect object to be unlocked)
 361   CodeEmitInfo* info = state_for(x, x->state(), true);
 362   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 363                         x->monitor_no(), info_for_exception, info, throw_imse_stub);
 364 }
 365 
 366 
 367 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 368   assert(x->is_pinned(),"");
 369 
 370   LIRItem obj(x->obj(), this);
 371   obj.dont_load_item();
 372 
 373   LIR_Opr lock = new_register(T_INT);
 374   LIR_Opr obj_temp = new_register(T_INT);
 375   set_no_result(x);
 376   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 377 }
 378 
 379 
 380 void LIRGenerator::do_NegateOp(NegateOp* x) {
 381 
 382   LIRItem from(x->x(), this);
 383   from.load_item();
 384   LIR_Opr result = rlock_result(x);
 385   __ negate (from.result(), result);
 386 
 387 }
 388 
 389 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 390 //      _dadd, _dmul, _dsub, _ddiv, _drem
 391 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 392 
 393   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 394     // float remainder is implemented as a direct call into the runtime
 395     LIRItem right(x->x(), this);
 396     LIRItem left(x->y(), this);
 397 
 398     BasicTypeList signature(2);
 399     if (x->op() == Bytecodes::_frem) {
 400       signature.append(T_FLOAT);
 401       signature.append(T_FLOAT);
 402     } else {
 403       signature.append(T_DOUBLE);
 404       signature.append(T_DOUBLE);
 405     }
 406     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 407 
 408     const LIR_Opr result_reg = result_register_for(x->type());
 409     left.load_item_force(cc->at(1));
 410     right.load_item();
 411 
 412     __ move(right.result(), cc->at(0));
 413 
 414     address entry;
 415     if (x->op() == Bytecodes::_frem) {
 416       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 417     } else {
 418       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 419     }
 420 
 421     LIR_Opr result = rlock_result(x);
 422     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 423     __ move(result_reg, result);
 424 
 425     return;
 426   }
 427 
 428   LIRItem left(x->x(),  this);
 429   LIRItem right(x->y(), this);
 430   LIRItem* left_arg  = &left;
 431   LIRItem* right_arg = &right;
 432 
 433   // Always load right hand side.
 434   right.load_item();
 435 
 436   if (!left.is_register())
 437     left.load_item();
 438 
 439   LIR_Opr reg = rlock(x);
 440   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 441   if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
 442     tmp = new_register(T_DOUBLE);
 443   }
 444 
 445   arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL);
 446 
 447   set_result(x, round_item(reg));
 448 }
 449 
 450 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 451 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 452 
 453   // missing test if instr is commutative and if we should swap
 454   LIRItem left(x->x(), this);
 455   LIRItem right(x->y(), this);
 456 
 457   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 458 
 459     left.load_item();
 460     bool need_zero_check = true;
 461     if (right.is_constant()) {
 462       jlong c = right.get_jlong_constant();
 463       // no need to do div-by-zero check if the divisor is a non-zero constant
 464       if (c != 0) need_zero_check = false;
 465       // do not load right if the divisor is a power-of-2 constant
 466       if (c > 0 && is_power_of_2_long(c)) {
 467         right.dont_load_item();
 468       } else {
 469         right.load_item();
 470       }
 471     } else {
 472       right.load_item();
 473     }
 474     if (need_zero_check) {
 475       CodeEmitInfo* info = state_for(x);
 476       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 477       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 478     }
 479 
 480     rlock_result(x);
 481     switch (x->op()) {
 482     case Bytecodes::_lrem:
 483       __ rem (left.result(), right.result(), x->operand());
 484       break;
 485     case Bytecodes::_ldiv:
 486       __ div (left.result(), right.result(), x->operand());
 487       break;
 488     default:
 489       ShouldNotReachHere();
 490       break;
 491     }
 492 
 493 
 494   } else {
 495     assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 496             "expect lmul, ladd or lsub");
 497     // add, sub, mul
 498     left.load_item();
 499     if (! right.is_register()) {
 500       if (x->op() == Bytecodes::_lmul
 501           || ! right.is_constant()
 502           || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
 503         right.load_item();
 504       } else { // add, sub
 505         assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
 506         // don't load constants to save register
 507         right.load_nonconstant();
 508       }
 509     }
 510     rlock_result(x);
 511     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 512   }
 513 }
 514 
 515 // for: _iadd, _imul, _isub, _idiv, _irem
 516 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 517 
 518   // Test if instr is commutative and if we should swap
 519   LIRItem left(x->x(),  this);
 520   LIRItem right(x->y(), this);
 521   LIRItem* left_arg = &left;
 522   LIRItem* right_arg = &right;
 523   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 524     // swap them if left is real stack (or cached) and right is real register(not cached)
 525     left_arg = &right;
 526     right_arg = &left;
 527   }
 528 
 529   left_arg->load_item();
 530 
 531   // do not need to load right, as we can handle stack and constants
 532   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 533 
 534     rlock_result(x);
 535     bool need_zero_check = true;
 536     if (right.is_constant()) {
 537       jint c = right.get_jint_constant();
 538       // no need to do div-by-zero check if the divisor is a non-zero constant
 539       if (c != 0) need_zero_check = false;
 540       // do not load right if the divisor is a power-of-2 constant
 541       if (c > 0 && is_power_of_2(c)) {
 542         right_arg->dont_load_item();
 543       } else {
 544         right_arg->load_item();
 545       }
 546     } else {
 547       right_arg->load_item();
 548     }
 549     if (need_zero_check) {
 550       CodeEmitInfo* info = state_for(x);
 551       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 552       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
 553     }
 554 
 555     LIR_Opr ill = LIR_OprFact::illegalOpr;
 556     if (x->op() == Bytecodes::_irem) {
 557       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 558     } else if (x->op() == Bytecodes::_idiv) {
 559       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 560     }
 561 
 562   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 563     if (right.is_constant()
 564         && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
 565       right.load_nonconstant();
 566     } else {
 567       right.load_item();
 568     }
 569     rlock_result(x);
 570     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 571   } else {
 572     assert (x->op() == Bytecodes::_imul, "expect imul");
 573     if (right.is_constant()) {
 574       jint c = right.get_jint_constant();
 575       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 576         right_arg->dont_load_item();
 577       } else {
 578         // Cannot use constant op.
 579         right_arg->load_item();
 580       }
 581     } else {
 582       right.load_item();
 583     }
 584     rlock_result(x);
 585     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 586   }
 587 }
 588 
 589 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 590   // when an operand with use count 1 is the left operand, then it is
 591   // likely that no move for 2-operand-LIR-form is necessary
 592   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 593     x->swap_operands();
 594   }
 595 
 596   ValueTag tag = x->type()->tag();
 597   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 598   switch (tag) {
 599     case floatTag:
 600     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 601     case longTag:    do_ArithmeticOp_Long(x); return;
 602     case intTag:     do_ArithmeticOp_Int(x);  return;
 603     default:         ShouldNotReachHere();    return;
 604   }
 605 }
 606 
 607 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 608 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 609 
 610   LIRItem left(x->x(),  this);
 611   LIRItem right(x->y(), this);
 612 
 613   left.load_item();
 614 
 615   rlock_result(x);
 616   if (right.is_constant()) {
 617     right.dont_load_item();
 618 
 619     switch (x->op()) {
 620     case Bytecodes::_ishl: {
 621       int c = right.get_jint_constant() & 0x1f;
 622       __ shift_left(left.result(), c, x->operand());
 623       break;
 624     }
 625     case Bytecodes::_ishr: {
 626       int c = right.get_jint_constant() & 0x1f;
 627       __ shift_right(left.result(), c, x->operand());
 628       break;
 629     }
 630     case Bytecodes::_iushr: {
 631       int c = right.get_jint_constant() & 0x1f;
 632       __ unsigned_shift_right(left.result(), c, x->operand());
 633       break;
 634     }
 635     case Bytecodes::_lshl: {
 636       int c = right.get_jint_constant() & 0x3f;
 637       __ shift_left(left.result(), c, x->operand());
 638       break;
 639     }
 640     case Bytecodes::_lshr: {
 641       int c = right.get_jint_constant() & 0x3f;
 642       __ shift_right(left.result(), c, x->operand());
 643       break;
 644     }
 645     case Bytecodes::_lushr: {
 646       int c = right.get_jint_constant() & 0x3f;
 647       __ unsigned_shift_right(left.result(), c, x->operand());
 648       break;
 649     }
 650     default:
 651       ShouldNotReachHere();
 652     }
 653   } else {
 654     right.load_item();
 655     LIR_Opr tmp = new_register(T_INT);
 656     switch (x->op()) {
 657     case Bytecodes::_ishl: {
 658       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 659       __ shift_left(left.result(), tmp, x->operand(), tmp);
 660       break;
 661     }
 662     case Bytecodes::_ishr: {
 663       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 664       __ shift_right(left.result(), tmp, x->operand(), tmp);
 665       break;
 666     }
 667     case Bytecodes::_iushr: {
 668       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 669       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 670       break;
 671     }
 672     case Bytecodes::_lshl: {
 673       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 674       __ shift_left(left.result(), tmp, x->operand(), tmp);
 675       break;
 676     }
 677     case Bytecodes::_lshr: {
 678       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 679       __ shift_right(left.result(), tmp, x->operand(), tmp);
 680       break;
 681     }
 682     case Bytecodes::_lushr: {
 683       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 684       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 685       break;
 686     }
 687     default:
 688       ShouldNotReachHere();
 689     }
 690   }
 691 }
 692 
 693 // _iand, _land, _ior, _lor, _ixor, _lxor
 694 void LIRGenerator::do_LogicOp(LogicOp* x) {
 695 
 696   LIRItem left(x->x(),  this);
 697   LIRItem right(x->y(), this);
 698 
 699   left.load_item();
 700 
 701   rlock_result(x);
 702   if (right.is_constant()
 703       && ((right.type()->tag() == intTag
 704            && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
 705           || (right.type()->tag() == longTag
 706               && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
 707     right.dont_load_item();
 708   } else {
 709     right.load_item();
 710   }
 711   switch (x->op()) {
 712   case Bytecodes::_iand:
 713   case Bytecodes::_land:
 714     __ logical_and(left.result(), right.result(), x->operand()); break;
 715   case Bytecodes::_ior:
 716   case Bytecodes::_lor:
 717     __ logical_or (left.result(), right.result(), x->operand()); break;
 718   case Bytecodes::_ixor:
 719   case Bytecodes::_lxor:
 720     __ logical_xor(left.result(), right.result(), x->operand()); break;
 721   default: Unimplemented();
 722   }
 723 }
 724 
 725 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 726 void LIRGenerator::do_CompareOp(CompareOp* x) {
 727   LIRItem left(x->x(), this);
 728   LIRItem right(x->y(), this);
 729   ValueTag tag = x->x()->type()->tag();
 730   if (tag == longTag) {
 731     left.set_destroys_register();
 732   }
 733   left.load_item();
 734   right.load_item();
 735   LIR_Opr reg = rlock_result(x);
 736 
 737   if (x->x()->type()->is_float_kind()) {
 738     Bytecodes::Code code = x->op();
 739     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 740   } else if (x->x()->type()->tag() == longTag) {
 741     __ lcmp2int(left.result(), right.result(), reg);
 742   } else {
 743     Unimplemented();
 744   }
 745 }
 746 
 747 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 748   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 749   new_value.load_item();
 750   cmp_value.load_item();
 751   LIR_Opr result = new_register(T_INT);
 752   if (type == T_OBJECT || type == T_ARRAY) {
 753     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 754   } else if (type == T_INT) {
 755     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 756   } else if (type == T_LONG) {
 757     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 758   } else {
 759     ShouldNotReachHere();
 760     Unimplemented();
 761   }
 762   __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
 763   return result;
 764 }
 765 
 766 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 767   bool is_oop = type == T_OBJECT || type == T_ARRAY;
 768   LIR_Opr result = new_register(type);
 769   value.load_item();
 770   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 771   LIR_Opr tmp = new_register(T_INT);
 772   __ xchg(addr, value.result(), result, tmp);
 773   return result;
 774 }
 775 
 776 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 777   LIR_Opr result = new_register(type);
 778   value.load_item();
 779   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 780   LIR_Opr tmp = new_register(T_INT);
 781   __ xadd(addr, value.result(), result, tmp);
 782   return result;
 783 }
 784 
 785 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 786   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
 787   if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
 788       x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
 789       x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
 790       x->id() == vmIntrinsics::_dlog10) {
 791     do_LibmIntrinsic(x);
 792     return;
 793   }
 794   switch (x->id()) {
 795     case vmIntrinsics::_dabs:
 796     case vmIntrinsics::_dsqrt: {
 797       assert(x->number_of_arguments() == 1, "wrong type");
 798       LIRItem value(x->argument_at(0), this);
 799       value.load_item();
 800       LIR_Opr dst = rlock_result(x);
 801 
 802       switch (x->id()) {
 803         case vmIntrinsics::_dsqrt: {
 804           __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 805           break;
 806         }
 807         case vmIntrinsics::_dabs: {
 808           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 809           break;
 810         }
 811         default:
 812           ShouldNotReachHere();
 813       }
 814       break;
 815     }
 816     default:
 817       ShouldNotReachHere();
 818   }
 819 }
 820 
 821 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 822   LIRItem value(x->argument_at(0), this);
 823   value.set_destroys_register();
 824 
 825   LIR_Opr calc_result = rlock_result(x);
 826   LIR_Opr result_reg = result_register_for(x->type());
 827 
 828   CallingConvention* cc = NULL;
 829 
 830   if (x->id() == vmIntrinsics::_dpow) {
 831     LIRItem value1(x->argument_at(1), this);
 832 
 833     value1.set_destroys_register();
 834 
 835     BasicTypeList signature(2);
 836     signature.append(T_DOUBLE);
 837     signature.append(T_DOUBLE);
 838     cc = frame_map()->c_calling_convention(&signature);
 839     value.load_item_force(cc->at(0));
 840     value1.load_item_force(cc->at(1));
 841   } else {
 842     BasicTypeList signature(1);
 843     signature.append(T_DOUBLE);
 844     cc = frame_map()->c_calling_convention(&signature);
 845     value.load_item_force(cc->at(0));
 846   }
 847 
 848   switch (x->id()) {
 849     case vmIntrinsics::_dexp:
 850       if (StubRoutines::dexp() != NULL) {
 851         __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
 852       } else {
 853         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
 854       }
 855       break;
 856     case vmIntrinsics::_dlog:
 857       if (StubRoutines::dlog() != NULL) {
 858         __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
 859       } else {
 860         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
 861       }
 862       break;
 863     case vmIntrinsics::_dlog10:
 864       if (StubRoutines::dlog10() != NULL) {
 865         __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
 866       } else {
 867         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
 868       }
 869       break;
 870     case vmIntrinsics::_dpow:
 871       if (StubRoutines::dpow() != NULL) {
 872         __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
 873       } else {
 874         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
 875       }
 876       break;
 877     case vmIntrinsics::_dsin:
 878       if (StubRoutines::dsin() != NULL) {
 879         __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
 880       } else {
 881         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
 882       }
 883       break;
 884     case vmIntrinsics::_dcos:
 885       if (StubRoutines::dcos() != NULL) {
 886         __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
 887       } else {
 888         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
 889       }
 890       break;
 891     case vmIntrinsics::_dtan:
 892       if (StubRoutines::dtan() != NULL) {
 893         __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
 894       } else {
 895         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
 896       }
 897       break;
 898     default:  ShouldNotReachHere();
 899   }
 900   __ move(result_reg, calc_result);
 901 }
 902 
 903 
 904 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 905   assert(x->number_of_arguments() == 5, "wrong type");
 906 
 907   // Make all state_for calls early since they can emit code
 908   CodeEmitInfo* info = state_for(x, x->state());
 909 
 910   LIRItem src(x->argument_at(0), this);
 911   LIRItem src_pos(x->argument_at(1), this);
 912   LIRItem dst(x->argument_at(2), this);
 913   LIRItem dst_pos(x->argument_at(3), this);
 914   LIRItem length(x->argument_at(4), this);
 915 
 916   // operands for arraycopy must use fixed registers, otherwise
 917   // LinearScan will fail allocation (because arraycopy always needs a
 918   // call)
 919 
 920   // The java calling convention will give us enough registers
 921   // so that on the stub side the args will be perfect already.
 922   // On the other slow/special case side we call C and the arg
 923   // positions are not similar enough to pick one as the best.
 924   // Also because the java calling convention is a "shifted" version
 925   // of the C convention we can process the java args trivially into C
 926   // args without worry of overwriting during the xfer
 927 
 928   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 929   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 930   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 931   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 932   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 933 
 934   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 935 
 936   set_no_result(x);
 937 
 938   int flags;
 939   ciArrayKlass* expected_type;
 940   arraycopy_helper(x, &flags, &expected_type);
 941 
 942   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 943 }
 944 
 945 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 946   assert(UseCRC32Intrinsics, "why are we here?");
 947   // Make all state_for calls early since they can emit code
 948   LIR_Opr result = rlock_result(x);
 949   int flags = 0;
 950   switch (x->id()) {
 951     case vmIntrinsics::_updateCRC32: {
 952       LIRItem crc(x->argument_at(0), this);
 953       LIRItem val(x->argument_at(1), this);
 954       // val is destroyed by update_crc32
 955       val.set_destroys_register();
 956       crc.load_item();
 957       val.load_item();
 958       __ update_crc32(crc.result(), val.result(), result);
 959       break;
 960     }
 961     case vmIntrinsics::_updateBytesCRC32:
 962     case vmIntrinsics::_updateByteBufferCRC32: {
 963       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
 964 
 965       LIRItem crc(x->argument_at(0), this);
 966       LIRItem buf(x->argument_at(1), this);
 967       LIRItem off(x->argument_at(2), this);
 968       LIRItem len(x->argument_at(3), this);
 969       buf.load_item();
 970       off.load_nonconstant();
 971 
 972       LIR_Opr index = off.result();
 973       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
 974       if(off.result()->is_constant()) {
 975         index = LIR_OprFact::illegalOpr;
 976        offset += off.result()->as_jint();
 977       }
 978       LIR_Opr base_op = buf.result();
 979 
 980       if (index->is_valid()) {
 981         LIR_Opr tmp = new_register(T_LONG);
 982         __ convert(Bytecodes::_i2l, index, tmp);
 983         index = tmp;
 984       }
 985 
 986       if (is_updateBytes) {
 987         base_op = access_resolve(ACCESS_READ, base_op);
 988       }
 989 
 990       if (offset) {
 991         LIR_Opr tmp = new_pointer_register();
 992         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
 993         base_op = tmp;
 994         offset = 0;
 995       }
 996 
 997       LIR_Address* a = new LIR_Address(base_op,
 998                                        index,
 999                                        offset,
1000                                        T_BYTE);
1001       BasicTypeList signature(3);
1002       signature.append(T_INT);
1003       signature.append(T_ADDRESS);
1004       signature.append(T_INT);
1005       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1006       const LIR_Opr result_reg = result_register_for(x->type());
1007 
1008       LIR_Opr addr = new_pointer_register();
1009       __ leal(LIR_OprFact::address(a), addr);
1010 
1011       crc.load_item_force(cc->at(0));
1012       __ move(addr, cc->at(1));
1013       len.load_item_force(cc->at(2));
1014 
1015       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1016       __ move(result_reg, result);
1017 
1018       break;
1019     }
1020     default: {
1021       ShouldNotReachHere();
1022     }
1023   }
1024 }
1025 
1026 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1027   assert(UseCRC32CIntrinsics, "why are we here?");
1028   // Make all state_for calls early since they can emit code
1029   LIR_Opr result = rlock_result(x);
1030   int flags = 0;
1031   switch (x->id()) {
1032     case vmIntrinsics::_updateBytesCRC32C:
1033     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1034       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1035       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1036 
1037       LIRItem crc(x->argument_at(0), this);
1038       LIRItem buf(x->argument_at(1), this);
1039       LIRItem off(x->argument_at(2), this);
1040       LIRItem end(x->argument_at(3), this);
1041 
1042       buf.load_item();
1043       off.load_nonconstant();
1044       end.load_nonconstant();
1045 
1046       // len = end - off
1047       LIR_Opr len  = end.result();
1048       LIR_Opr tmpA = new_register(T_INT);
1049       LIR_Opr tmpB = new_register(T_INT);
1050       __ move(end.result(), tmpA);
1051       __ move(off.result(), tmpB);
1052       __ sub(tmpA, tmpB, tmpA);
1053       len = tmpA;
1054 
1055       LIR_Opr index = off.result();
1056       if(off.result()->is_constant()) {
1057         index = LIR_OprFact::illegalOpr;
1058         offset += off.result()->as_jint();
1059       }
1060       LIR_Opr base_op = buf.result();
1061 
1062       if (index->is_valid()) {
1063         LIR_Opr tmp = new_register(T_LONG);
1064         __ convert(Bytecodes::_i2l, index, tmp);
1065         index = tmp;
1066       }
1067 
1068       if (is_updateBytes) {
1069         base_op = access_resolve(ACCESS_READ, base_op);
1070       }
1071 
1072       if (offset) {
1073         LIR_Opr tmp = new_pointer_register();
1074         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1075         base_op = tmp;
1076         offset = 0;
1077       }
1078 
1079       LIR_Address* a = new LIR_Address(base_op,
1080                                        index,
1081                                        offset,
1082                                        T_BYTE);
1083       BasicTypeList signature(3);
1084       signature.append(T_INT);
1085       signature.append(T_ADDRESS);
1086       signature.append(T_INT);
1087       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1088       const LIR_Opr result_reg = result_register_for(x->type());
1089 
1090       LIR_Opr addr = new_pointer_register();
1091       __ leal(LIR_OprFact::address(a), addr);
1092 
1093       crc.load_item_force(cc->at(0));
1094       __ move(addr, cc->at(1));
1095       __ move(len, cc->at(2));
1096 
1097       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1098       __ move(result_reg, result);
1099 
1100       break;
1101     }
1102     default: {
1103       ShouldNotReachHere();
1104     }
1105   }
1106 }
1107 
1108 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1109   assert(x->number_of_arguments() == 3, "wrong type");
1110   assert(UseFMA, "Needs FMA instructions support.");
1111   LIRItem value(x->argument_at(0), this);
1112   LIRItem value1(x->argument_at(1), this);
1113   LIRItem value2(x->argument_at(2), this);
1114 
1115   value.load_item();
1116   value1.load_item();
1117   value2.load_item();
1118 
1119   LIR_Opr calc_input = value.result();
1120   LIR_Opr calc_input1 = value1.result();
1121   LIR_Opr calc_input2 = value2.result();
1122   LIR_Opr calc_result = rlock_result(x);
1123 
1124   switch (x->id()) {
1125   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1126   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1127   default:                    ShouldNotReachHere();
1128   }
1129 }
1130 
1131 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1132   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1133 }
1134 
1135 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1136 // _i2b, _i2c, _i2s
1137 void LIRGenerator::do_Convert(Convert* x) {
1138   LIRItem value(x->value(), this);
1139   value.load_item();
1140   LIR_Opr input = value.result();
1141   LIR_Opr result = rlock(x);
1142 
1143   // arguments of lir_convert
1144   LIR_Opr conv_input = input;
1145   LIR_Opr conv_result = result;
1146   ConversionStub* stub = NULL;
1147 
1148   __ convert(x->op(), conv_input, conv_result);
1149 
1150   assert(result->is_virtual(), "result must be virtual register");
1151   set_result(x, result);
1152 }
1153 
1154 void LIRGenerator::do_NewInstance(NewInstance* x) {
1155 #ifndef PRODUCT
1156   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1157     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1158   }
1159 #endif
1160   CodeEmitInfo* info = state_for(x, x->state());
1161   LIR_Opr reg = result_register_for(x->type());
1162   new_instance(reg, x->klass(), x->is_unresolved(),
1163                        FrameMap::r2_oop_opr,
1164                        FrameMap::r5_oop_opr,
1165                        FrameMap::r4_oop_opr,
1166                        LIR_OprFact::illegalOpr,
1167                        FrameMap::r3_metadata_opr, info);
1168   LIR_Opr result = rlock_result(x);
1169   __ move(reg, result);
1170 }
1171 
1172 void LIRGenerator::do_NewValueTypeInstance  (NewValueTypeInstance* x) {
1173   // Mapping to do_NewInstance (same code)
1174   CodeEmitInfo* info = state_for(x, x->state());
1175   x->set_to_object_type();
1176   LIR_Opr reg = result_register_for(x->type());
1177   new_instance(reg, x->klass(), x->is_unresolved(),
1178              FrameMap::r2_oop_opr,
1179              FrameMap::r5_oop_opr,
1180              FrameMap::r4_oop_opr,
1181              LIR_OprFact::illegalOpr,
1182              FrameMap::r3_metadata_opr, info);
1183   LIR_Opr result = rlock_result(x);
1184   __ move(reg, result);
1185 
1186 }
1187 
1188 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1189   CodeEmitInfo* info = state_for(x, x->state());
1190 
1191   LIRItem length(x->length(), this);
1192   length.load_item_force(FrameMap::r19_opr);
1193 
1194   LIR_Opr reg = result_register_for(x->type());
1195   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1196   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1197   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1198   LIR_Opr tmp4 = reg;
1199   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1200   LIR_Opr len = length.result();
1201   BasicType elem_type = x->elt_type();
1202 
1203   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1204 
1205   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1206   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1207 
1208   LIR_Opr result = rlock_result(x);
1209   __ move(reg, result);
1210 }
1211 
1212 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1213   LIRItem length(x->length(), this);
1214   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1215   // and therefore provide the state before the parameters have been consumed
1216   CodeEmitInfo* patching_info = NULL;
1217   if (!x->klass()->is_loaded() || PatchALot) {
1218     patching_info =  state_for(x, x->state_before());
1219   }
1220 
1221   CodeEmitInfo* info = state_for(x, x->state());
1222 
1223   LIR_Opr reg = result_register_for(x->type());
1224   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1225   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1226   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1227   LIR_Opr tmp4 = reg;
1228   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1229 
1230   length.load_item_force(FrameMap::r19_opr);
1231   LIR_Opr len = length.result();
1232 
1233   // DMS CHECK: Should we allocate slow path after BAILOUT?
1234   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, false); 
1235 
1236   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1237   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1238     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1239   }
1240   klass2reg_with_patching(klass_reg, obj, patching_info);
1241 
1242   if (obj->is_value_array_klass()) {
1243     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path);
1244   } else {
1245     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1246   }
1247 
1248   LIR_Opr result = rlock_result(x);
1249   __ move(reg, result);
1250 }
1251 
1252 
1253 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1254   Values* dims = x->dims();
1255   int i = dims->length();
1256   LIRItemList* items = new LIRItemList(i, i, NULL);
1257   while (i-- > 0) {
1258     LIRItem* size = new LIRItem(dims->at(i), this);
1259     items->at_put(i, size);
1260   }
1261 
1262   // Evaluate state_for early since it may emit code.
1263   CodeEmitInfo* patching_info = NULL;
1264   if (!x->klass()->is_loaded() || PatchALot) {
1265     patching_info = state_for(x, x->state_before());
1266 
1267     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1268     // clone all handlers (NOTE: Usually this is handled transparently
1269     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1270     // is done explicitly here because a stub isn't being used).
1271     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1272   }
1273   CodeEmitInfo* info = state_for(x, x->state());
1274 
1275   i = dims->length();
1276   while (i-- > 0) {
1277     LIRItem* size = items->at(i);
1278     size->load_item();
1279 
1280     store_stack_parameter(size->result(), in_ByteSize(i*4));
1281   }
1282 
1283   LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1284   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1285 
1286   LIR_Opr rank = FrameMap::r19_opr;
1287   __ move(LIR_OprFact::intConst(x->rank()), rank);
1288   LIR_Opr varargs = FrameMap::r2_opr;
1289   __ move(FrameMap::sp_opr, varargs);
1290   LIR_OprList* args = new LIR_OprList(3);
1291   args->append(klass_reg);
1292   args->append(rank);
1293   args->append(varargs);
1294   LIR_Opr reg = result_register_for(x->type());
1295   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1296                   LIR_OprFact::illegalOpr,
1297                   reg, args, info);
1298 
1299   LIR_Opr result = rlock_result(x);
1300   __ move(reg, result);
1301 }
1302 
1303 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1304   // nothing to do for now
1305 }
1306 
1307 void LIRGenerator::do_CheckCast(CheckCast* x) {
1308   LIRItem obj(x->obj(), this);
1309 
1310   CodeEmitInfo* patching_info = NULL;
1311   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1312     // must do this before locking the destination register as an oop register,
1313     // and before the obj is loaded (the latter is for deoptimization)
1314     patching_info = state_for(x, x->state_before());
1315   }
1316   obj.load_item();
1317 
1318   // info for exceptions
1319   CodeEmitInfo* info_for_exception =
1320       (x->needs_exception_state() ? state_for(x) :
1321                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1322 
1323   CodeStub* stub;
1324   if (x->is_incompatible_class_change_check()) {
1325     assert(patching_info == NULL, "can't patch this");
1326     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1327   } else if (x->is_invokespecial_receiver_check()) {
1328     assert(patching_info == NULL, "can't patch this");
1329     stub = new DeoptimizeStub(info_for_exception,
1330                               Deoptimization::Reason_class_check,
1331                               Deoptimization::Action_none);
1332   } else {
1333     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1334   }
1335   LIR_Opr reg = rlock_result(x);
1336   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1337   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1338     tmp3 = new_register(objectType);
1339   }
1340 
1341 
1342   __ checkcast(reg, obj.result(), x->klass(),
1343                new_register(objectType), new_register(objectType), tmp3,
1344                x->direct_compare(), info_for_exception, patching_info, stub,
1345                x->profiled_method(), x->profiled_bci(), x->is_never_null());
1346 
1347 }
1348 
1349 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1350   LIRItem obj(x->obj(), this);
1351 
1352   // result and test object may not be in same register
1353   LIR_Opr reg = rlock_result(x);
1354   CodeEmitInfo* patching_info = NULL;
1355   if ((!x->klass()->is_loaded() || PatchALot)) {
1356     // must do this before locking the destination register as an oop register
1357     patching_info = state_for(x, x->state_before());
1358   }
1359   obj.load_item();
1360   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1361   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1362     tmp3 = new_register(objectType);
1363   }
1364   __ instanceof(reg, obj.result(), x->klass(),
1365                 new_register(objectType), new_register(objectType), tmp3,
1366                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1367 }
1368 
1369 void LIRGenerator::do_If(If* x) {
1370   assert(x->number_of_sux() == 2, "inconsistency");
1371   ValueTag tag = x->x()->type()->tag();
1372   bool is_safepoint = x->is_safepoint();
1373 
1374   If::Condition cond = x->cond();
1375 
1376   LIRItem xitem(x->x(), this);
1377   LIRItem yitem(x->y(), this);
1378   LIRItem* xin = &xitem;
1379   LIRItem* yin = &yitem;
1380 
1381   if (tag == longTag) {
1382     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1383     // mirror for other conditions
1384     if (cond == If::gtr || cond == If::leq) {
1385       cond = Instruction::mirror(cond);
1386       xin = &yitem;
1387       yin = &xitem;
1388     }
1389     xin->set_destroys_register();
1390   }
1391   xin->load_item();
1392 
1393   if (tag == longTag) {
1394     if (yin->is_constant()
1395         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1396       yin->dont_load_item();
1397     } else {
1398       yin->load_item();
1399     }
1400   } else if (tag == intTag) {
1401     if (yin->is_constant()
1402         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant()))  {
1403       yin->dont_load_item();
1404     } else {
1405       yin->load_item();
1406     }
1407   } else {
1408     yin->load_item();
1409   }
1410 
1411   set_no_result(x);
1412 
1413   LIR_Opr left = xin->result();
1414   LIR_Opr right = yin->result();
1415 
1416   // add safepoint before generating condition code so it can be recomputed
1417   if (x->is_safepoint()) {
1418     // increment backedge counter if needed
1419     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1420         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1421     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1422   }
1423 
1424   __ cmp(lir_cond(cond), left, right);
1425   // Generate branch profiling. Profiling code doesn't kill flags.
1426   profile_branch(x, cond);
1427   move_to_phi(x->state());
1428   if (x->x()->type()->is_float_kind()) {
1429     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1430   } else {
1431     __ branch(lir_cond(cond), right->type(), x->tsux());
1432   }
1433   assert(x->default_sux() == x->fsux(), "wrong destination above");
1434   __ jump(x->default_sux());
1435 }
1436 
1437 LIR_Opr LIRGenerator::getThreadPointer() {
1438    return FrameMap::as_pointer_opr(rthread);
1439 }
1440 
1441 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1442 
1443 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1444                                         CodeEmitInfo* info) {
1445   __ volatile_store_mem_reg(value, address, info);
1446 }
1447 
1448 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1449                                        CodeEmitInfo* info) {
1450   // 8179954: We need to make sure that the code generated for
1451   // volatile accesses forms a sequentially-consistent set of
1452   // operations when combined with STLR and LDAR.  Without a leading
1453   // membar it's possible for a simple Dekker test to fail if loads
1454   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1455   // the stores in one method and C1 compiles the loads in another.
1456   if (! UseBarriersForVolatile) {
1457     __ membar();
1458   }
1459 
1460   __ volatile_load_mem_reg(address, result, info);
1461 }