1 /*
   2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 // This file is a derivative work resulting from (and including) modifications
  26 // made by Azul Systems, Inc.  The dates of such changes are 2013-2016.
  27 // Copyright 2013-2016 Azul Systems, Inc.  All Rights Reserved.
  28 //
  29 // Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,
  30 // CA 94089 USA or visit www.azul.com if you need additional information or
  31 // have any questions.
  32 
  33 #include "precompiled.hpp"
  34 #include "c1/c1_Compilation.hpp"
  35 #include "c1/c1_FrameMap.hpp"
  36 #include "c1/c1_Instruction.hpp"
  37 #include "c1/c1_LIRAssembler.hpp"
  38 #include "c1/c1_LIRGenerator.hpp"
  39 #include "c1/c1_Runtime1.hpp"
  40 #include "c1/c1_ValueStack.hpp"
  41 #include "ci/ciArray.hpp"
  42 #include "ci/ciObjArrayKlass.hpp"
  43 #include "ci/ciTypeArrayKlass.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "vmreg_aarch32.inline.hpp"
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 // Item will be loaded into a byte register; Intel only
  55 void LIRItem::load_byte_item() {
  56   load_item();
  57 }
  58 
  59 
  60 void LIRItem::load_nonconstant() {
  61   LIR_Opr r = value()->operand();
  62   if (r->is_constant()) {
  63     _result = r;
  64   } else {
  65     load_item();
  66   }
  67 }
  68 
  69 //--------------------------------------------------------------
  70 //               LIRGenerator
  71 //--------------------------------------------------------------
  72 
  73 
  74 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
  75 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
  76 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  77 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  78 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  79 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  80 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
  81 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  82 
  83 
  84 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  85   LIR_Opr opr;
  86   switch (type->tag()) {
  87     case intTag:     opr = FrameMap::r0_opr;          break;
  88     case objectTag:  opr = FrameMap::r0_oop_opr;      break;
  89     case longTag:    opr = FrameMap::long0_opr;        break;
  90     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  91     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  92 
  93     case addressTag:
  94     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  95   }
  96 
  97   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  98   return opr;
  99 }
 100 
 101 
 102 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 103   LIR_Opr reg = new_register(T_INT);
 104   set_vreg_flag(reg, LIRGenerator::byte_reg);
 105   return reg;
 106 }
 107 
 108 
 109 //--------- loading items into registers --------------------------------
 110 
 111 
 112 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 113   if (v->type()->as_IntConstant() != NULL) {
 114     return v->type()->as_IntConstant()->value() == 0L;
 115   } else if (v->type()->as_LongConstant() != NULL) {
 116     return v->type()->as_LongConstant()->value() == 0L;
 117   } else if (v->type()->as_ObjectConstant() != NULL) {
 118     return v->type()->as_ObjectConstant()->value()->is_null_object();
 119   } else {
 120     return false;
 121   }
 122 }
 123 
 124 bool LIRGenerator::can_inline_as_constant(Value v) const {
 125   if (v->type()->as_IntConstant() != NULL) {
 126     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 127   } else if (v->type()->as_LongConstant() != NULL) {
 128     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_LongConstant()->value());
 129   } else if (v->type()->as_ObjectConstant() != NULL) {
 130     return v->type()->as_ObjectConstant()->value()->is_null_object();
 131   } else {
 132     return false;
 133   }
 134 }
 135 
 136 
 137 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 138   switch (c->type()) {
 139   case T_BOOLEAN:
 140   case T_CHAR:
 141   case T_BYTE:
 142   case T_SHORT:
 143   case T_INT:
 144     return Assembler::operand_valid_for_add_sub_immediate(c->as_jint());
 145   case T_LONG:
 146     return Assembler::operand_valid_for_add_sub_immediate(c->as_jlong());
 147 
 148   case T_OBJECT:
 149     return c->as_jobject() == (jobject) NULL;
 150   case T_METADATA:
 151     return c->as_metadata() == (Metadata*) NULL;
 152 
 153   case T_FLOAT:
 154     return Assembler::operand_valid_for_float_immediate(c->as_jfloat());
 155   case T_DOUBLE:
 156     return Assembler::operand_valid_for_float_immediate(c->as_jdouble());
 157   }
 158   return false;
 159 }
 160 
 161 LIR_Opr LIRGenerator::safepoint_poll_register() {
 162   return LIR_OprFact::illegalOpr;
 163 }
 164 
 165 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 166                                             int shift, int disp, BasicType type) {
 167   const Address::InsnDataType insn_type = Address::toInsnDataType(type);
 168   assert(base->is_register(), "must be");
 169 
 170   // accumulate fixed displacements
 171   if (index->is_constant()) {
 172     disp += index->as_constant_ptr()->as_jint() << shift;
 173     index = LIR_OprFact::illegalOpr;
 174     shift = 0;
 175   }
 176 
 177   // aarch32 cannot handle natively both index and offset at the same time
 178   // need to calculate effective value
 179   if (index->is_register()) {
 180     if ((disp != 0) &&
 181         Address::shift_ok_for_index(lsl(shift), insn_type) &&
 182         Assembler::operand_valid_for_add_sub_immediate(disp)) {
 183       // add tmp, base, disp
 184       // ldr r, [tmp, index, LSL #shift ]
 185       LIR_Opr tmp = new_pointer_register();
 186       __ add(base, LIR_OprFact::intptrConst(disp), tmp);
 187       base = tmp;
 188       disp = 0;
 189     } else {
 190       assert(shift <= (int) LIR_Address::times_8, "no large shift could be here");
 191       // add tmp, base, index, LSL #shift
 192       // ...
 193       // ldr r, [tmp, ...]
 194       LIR_Opr tmp = new_pointer_register();
 195       __ leal(LIR_OprFact::address(new LIR_Address(base, index, (LIR_Address::Scale) shift, 0, type)), tmp);
 196       base = tmp;
 197       index = LIR_OprFact::illegalOpr;
 198       shift = 0;
 199     }
 200   }
 201 
 202   assert(!index->is_register() || (disp == 0), "should be");
 203 
 204   if (!Address::offset_ok_for_immed(disp, insn_type)) {
 205     assert(!index->is_valid(), "should be");
 206     // here index should be illegal so we can replace it with the displacement
 207     // loaded into a register
 208     // mov tmp, disp
 209     // ldr r, [base, tmp]
 210     index = new_pointer_register();
 211     __ move(LIR_OprFact::intptrConst(disp), index);
 212     disp = 0;
 213   }
 214 
 215   assert(Address::offset_ok_for_immed(disp, Address::toInsnDataType(type)), "must be");
 216   return new LIR_Address(base, index, (LIR_Address::Scale) shift, disp, type);
 217 }
 218 
 219 
 220 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 221                                               BasicType type, bool needs_card_mark) {
 222   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 223   int elem_size = type2aelembytes(type);
 224   int shift = exact_log2(elem_size);
 225 
 226   LIR_Address* addr = generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
 227 
 228   if (needs_card_mark) {
 229     // This store will need a precise card mark, so go ahead and
 230     // compute the full adddres instead of computing once for the
 231     // store and again for the card mark.
 232     LIR_Opr tmp = new_pointer_register();
 233     __ leal(LIR_OprFact::address(addr), tmp);
 234     return new LIR_Address(tmp, type);
 235   } else {
 236     return addr;
 237   }
 238 }
 239 
 240 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 241   LIR_Opr r;
 242   if (type == T_LONG) {
 243     r = LIR_OprFact::longConst(x);
 244     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 245       LIR_Opr tmp = new_register(type);
 246       __ move(r, tmp);
 247       return tmp;
 248     }
 249   } else if (type == T_INT) {
 250     r = LIR_OprFact::intConst(x);
 251     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 252       // This is all rather nasty.  We don't know whether our constant
 253       // is required for a logical or an arithmetic operation, wo we
 254       // don't know what the range of valid values is!!
 255       LIR_Opr tmp = new_register(type);
 256       __ move(r, tmp);
 257       return tmp;
 258     }
 259   } else {
 260     ShouldNotReachHere();
 261   }
 262   return r;
 263 }
 264 
 265 
 266 
 267 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 268   LIR_Opr pointer = new_pointer_register();
 269   __ move(LIR_OprFact::intptrConst(counter), pointer);
 270   LIR_Address* addr = new LIR_Address(pointer, type);
 271   increment_counter(addr, step);
 272 }
 273 
 274 
 275 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 276   LIR_Opr imm = NULL;
 277   switch(addr->type()) {
 278   case T_INT:
 279     imm = LIR_OprFact::intConst(step);
 280     break;
 281   case T_LONG:
 282     imm = LIR_OprFact::longConst(step);
 283     break;
 284   default:
 285     ShouldNotReachHere();
 286   }
 287   LIR_Opr reg = new_register(addr->type());
 288   __ load(addr, reg);
 289   __ add(reg, imm, reg);
 290   __ store(reg, addr);
 291 }
 292 
 293 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 294   LIR_Opr reg = new_register(T_INT);
 295   __ load(generate_address(base, disp, T_INT), reg, info);
 296   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 297 }
 298 
 299 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 300   LIR_Opr reg1 = new_register(T_INT);
 301   __ load(generate_address(base, disp, type), reg1, info);
 302   __ cmp(condition, reg, reg1);
 303 }
 304 
 305 
 306 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 307 
 308   if (is_power_of_2(c - 1)) {
 309     __ shift_left(left, exact_log2(c - 1), tmp);
 310     __ add(tmp, left, result);
 311     return true;
 312   } else if (is_power_of_2(c + 1)) {
 313     __ shift_left(left, exact_log2(c + 1), tmp);
 314     __ sub(tmp, left, result);
 315     return true;
 316   } else {
 317     return false;
 318   }
 319 }
 320 
 321 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 322   BasicType type = item->type();
 323   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 324 }
 325 
 326 //----------------------------------------------------------------------
 327 //             visitor functions
 328 //----------------------------------------------------------------------
 329 
 330 
 331 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 332   assert(x->is_pinned(),"");
 333   bool needs_range_check = x->compute_needs_range_check();
 334   bool use_length = x->length() != NULL;
 335   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 336   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 337                                          !get_jobject_constant(x->value())->is_null_object() ||
 338                                          x->should_profile());
 339 
 340   LIRItem array(x->array(), this);
 341   LIRItem index(x->index(), this);
 342   LIRItem value(x->value(), this);
 343   LIRItem length(this);
 344 
 345   array.load_item();
 346   index.load_nonconstant();
 347 
 348   if (use_length && needs_range_check) {
 349     length.set_instruction(x->length());
 350     length.load_item();
 351 
 352   }
 353   if (needs_store_check  || x->check_boolean()) {
 354     value.load_item();
 355   } else {
 356     value.load_for_store(x->elt_type());
 357   }
 358 
 359   set_no_result(x);
 360 
 361   // the CodeEmitInfo must be duplicated for each different
 362   // LIR-instruction because spilling can occur anywhere between two
 363   // instructions and so the debug information must be different
 364   CodeEmitInfo* range_check_info = state_for(x);
 365   CodeEmitInfo* null_check_info = NULL;
 366   if (x->needs_null_check()) {
 367     null_check_info = new CodeEmitInfo(range_check_info);
 368   }
 369 
 370   // emit array address setup early so it schedules better
 371   // FIXME?  No harm in this on aarch64, and it might help
 372   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 373 
 374   if (GenerateRangeChecks && needs_range_check) {
 375     if (use_length) {
 376       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 377       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 378     } else {
 379       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 380       // range_check also does the null check
 381       null_check_info = NULL;
 382     }
 383   }
 384 
 385   if (GenerateArrayStoreCheck && needs_store_check) {
 386     LIR_Opr tmp1 = new_register(objectType);
 387     LIR_Opr tmp2 = new_register(objectType);
 388     LIR_Opr tmp3 = new_register(objectType);
 389 
 390     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 391     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 392   }
 393 
 394   if (obj_store) {
 395     // Needs GC write barriers.
 396     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 397                 true /* do_load */, false /* patch */, NULL);
 398     __ move(value.result(), array_addr, null_check_info);
 399     // Seems to be a precise
 400     post_barrier(LIR_OprFact::address(array_addr), value.result());
 401   } else {
 402     LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 403     __ move(result, array_addr, null_check_info);
 404   }
 405 }
 406 
 407 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 408   assert(x->is_pinned(),"");
 409   LIRItem obj(x->obj(), this);
 410   obj.load_item();
 411 
 412   set_no_result(x);
 413 
 414   // "lock" stores the address of the monitor stack slot, so this is not an oop
 415   LIR_Opr lock = new_register(T_INT);
 416   // Need a scratch register for biased locking
 417   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 418   if (UseBiasedLocking) {
 419     scratch = new_register(T_INT);
 420   }
 421 
 422   CodeEmitInfo* info_for_exception = NULL;
 423   if (x->needs_null_check()) {
 424     info_for_exception = state_for(x);
 425   }
 426   // this CodeEmitInfo must not have the xhandlers because here the
 427   // object is already locked (xhandlers expect object to be unlocked)
 428   CodeEmitInfo* info = state_for(x, x->state(), true);
 429   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 430                         x->monitor_no(), info_for_exception, info);
 431 }
 432 
 433 
 434 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 435   assert(x->is_pinned(),"");
 436 
 437   LIRItem obj(x->obj(), this);
 438   obj.dont_load_item();
 439 
 440   LIR_Opr lock = new_register(T_INT);
 441   LIR_Opr obj_temp = new_register(T_INT);
 442   set_no_result(x);
 443   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 444 }
 445 
 446 
 447 void LIRGenerator::do_NegateOp(NegateOp* x) {
 448 
 449   LIRItem from(x->x(), this);
 450   from.load_item();
 451   LIR_Opr result = rlock_result(x);
 452   __ negate (from.result(), result);
 453 
 454 }
 455 
 456 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 457 //      _dadd, _dmul, _dsub, _ddiv, _drem
 458 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 459 
 460   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 461     // float remainder is implemented as a direct call into the runtime
 462     LIRItem right(x->x(), this);
 463     LIRItem left(x->y(), this);
 464 
 465     BasicTypeList signature(2);
 466     if (x->op() == Bytecodes::_frem) {
 467       signature.append(T_FLOAT);
 468       signature.append(T_FLOAT);
 469     } else {
 470       signature.append(T_DOUBLE);
 471       signature.append(T_DOUBLE);
 472     }
 473     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 474 
 475     const LIR_Opr result_reg = result_register_for(x->type());
 476     left.load_item_force(cc->at(1));
 477     right.load_item();
 478 
 479     __ move(right.result(), cc->at(0));
 480 
 481     address entry;
 482     if (x->op() == Bytecodes::_frem) {
 483       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 484     } else {
 485       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 486     }
 487 
 488     LIR_Opr result = rlock_result(x);
 489     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 490     __ move(result_reg, result);
 491 
 492     return;
 493   }
 494 
 495   LIRItem left(x->x(),  this);
 496   LIRItem right(x->y(), this);
 497   LIRItem* left_arg  = &left;
 498   LIRItem* right_arg = &right;
 499 
 500   // Always load right hand side.
 501   right.load_item();
 502 
 503   if (!left.is_register())
 504     left.load_item();
 505 
 506   LIR_Opr reg = rlock(x);
 507   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 508   if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
 509     tmp = new_register(T_DOUBLE);
 510   }
 511 
 512   arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL);
 513 
 514   set_result(x, round_item(reg));
 515 }
 516 
 517 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 518 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 519 
 520   // missing test if instr is commutative and if we should swap
 521   LIRItem left(x->x(), this);
 522   LIRItem right(x->y(), this);
 523 
 524   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 525 
 526     BasicTypeList signature(2);
 527     signature.append(T_LONG);
 528     signature.append(T_LONG);
 529     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 530 
 531     // check for division by zero (destroys registers of right operand!)
 532     CodeEmitInfo* info = state_for(x);
 533 
 534     right.load_item();
 535 
 536     __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 537     __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 538 
 539     const LIR_Opr result_reg = result_register_for(x->type());
 540     left.load_item_force(cc->at(1));
 541     __ move(right.result(), cc->at(0));
 542 
 543     address entry;
 544     switch (x->op()) {
 545     case Bytecodes::_lrem:
 546       entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
 547       break; // check if dividend is 0 is done elsewhere
 548     case Bytecodes::_ldiv:
 549       entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
 550       break; // check if dividend is 0 is done elsewhere
 551     default:
 552       ShouldNotReachHere();
 553     }
 554 
 555     LIR_Opr result = rlock_result(x);
 556     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 557     __ move(result_reg, result);
 558   } else {
 559     assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 560             "expect lmul, ladd or lsub");
 561     // add, sub, mul
 562     left.load_item();
 563     if (! right.is_register()) {
 564       if (x->op() == Bytecodes::_lmul
 565           || ! right.is_constant()
 566           || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
 567         right.load_item();
 568       } else { // add, sub
 569         assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
 570         // don't load constants to save register
 571         right.load_nonconstant();
 572       }
 573     }
 574     rlock_result(x);
 575     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 576   }
 577 }
 578 
 579 // for: _iadd, _imul, _isub, _idiv, _irem
 580 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 581 
 582   // Test if instr is commutative and if we should swap
 583   LIRItem left(x->x(),  this);
 584   LIRItem right(x->y(), this);
 585   LIRItem* left_arg = &left;
 586   LIRItem* right_arg = &right;
 587   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 588     // swap them if left is real stack (or cached) and right is real register(not cached)
 589     left_arg = &right;
 590     right_arg = &left;
 591   }
 592 
 593   left_arg->load_item();
 594 
 595   // do not need to load right, as we can handle stack and constants
 596   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 597 
 598     right_arg->load_item();
 599     rlock_result(x);
 600 
 601     if (!(VM_Version::features() & FT_HW_DIVIDE)) {
 602       // MacroAssembler::divide32 destroys both operand registers
 603       left_arg->set_destroys_register();
 604       right_arg->set_destroys_register();
 605     }
 606 
 607     CodeEmitInfo* info = state_for(x);
 608     LIR_Opr tmp = new_register(T_INT);
 609     __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::intConst(0));
 610     __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
 611     info = state_for(x);
 612 
 613     if (x->op() == Bytecodes::_irem) {
 614       __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
 615     } else if (x->op() == Bytecodes::_idiv) {
 616       __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
 617     }
 618 
 619   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 620     if (right.is_constant()
 621         && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
 622       right.load_nonconstant();
 623     } else {
 624       right.load_item();
 625     }
 626     rlock_result(x);
 627     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 628   } else {
 629     assert (x->op() == Bytecodes::_imul, "expect imul");
 630     if (right.is_constant()) {
 631       int c = right.get_jint_constant();
 632       if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) {
 633         // Cannot use constant op.
 634         right.load_item();
 635       } else {
 636         right.dont_load_item();
 637       }
 638     } else {
 639       right.load_item();
 640     }
 641     rlock_result(x);
 642     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 643   }
 644 }
 645 
 646 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 647   // when an operand with use count 1 is the left operand, then it is
 648   // likely that no move for 2-operand-LIR-form is necessary
 649   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 650     x->swap_operands();
 651   }
 652 
 653   ValueTag tag = x->type()->tag();
 654   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 655   switch (tag) {
 656     case floatTag:
 657     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 658     case longTag:    do_ArithmeticOp_Long(x); return;
 659     case intTag:     do_ArithmeticOp_Int(x);  return;
 660   }
 661   ShouldNotReachHere();
 662 }
 663 
 664 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 665 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 666 
 667   LIRItem left(x->x(),  this);
 668   LIRItem right(x->y(), this);
 669 
 670   left.load_item();
 671 
 672   rlock_result(x);
 673   if (right.is_constant()) {
 674     right.dont_load_item();
 675 
 676     switch (x->op()) {
 677     case Bytecodes::_ishl: {
 678       int c = right.get_jint_constant() & 0x1f;
 679       __ shift_left(left.result(), c, x->operand());
 680       break;
 681     }
 682     case Bytecodes::_ishr: {
 683       int c = right.get_jint_constant() & 0x1f;
 684       __ shift_right(left.result(), c, x->operand());
 685       break;
 686     }
 687     case Bytecodes::_iushr: {
 688       int c = right.get_jint_constant() & 0x1f;
 689       __ unsigned_shift_right(left.result(), c, x->operand());
 690       break;
 691     }
 692     case Bytecodes::_lshl: {
 693       int c = right.get_jint_constant() & 0x3f;
 694       __ shift_left(left.result(), c, x->operand());
 695       break;
 696     }
 697     case Bytecodes::_lshr: {
 698       int c = right.get_jint_constant() & 0x3f;
 699       __ shift_right(left.result(), c, x->operand());
 700       break;
 701     }
 702     case Bytecodes::_lushr: {
 703       int c = right.get_jint_constant() & 0x3f;
 704       __ unsigned_shift_right(left.result(), c, x->operand());
 705       break;
 706     }
 707     default:
 708       ShouldNotReachHere();
 709     }
 710   } else {
 711     right.load_item();
 712     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 713     if (left.result()->type() == T_LONG)
 714       left.set_destroys_register();
 715     switch (x->op()) {
 716     case Bytecodes::_ishl: {
 717       __ shift_left(left.result(), right.result(), x->operand(), tmp);
 718       break;
 719     }
 720     case Bytecodes::_ishr: {
 721       __ shift_right(left.result(), right.result(), x->operand(), tmp);
 722       break;
 723     }
 724     case Bytecodes::_iushr: {
 725       __ unsigned_shift_right(left.result(), right.result(), x->operand(), tmp);
 726       break;
 727     }
 728     case Bytecodes::_lshl: {
 729       __ shift_left(left.result(), right.result(), x->operand(), tmp);
 730       break;
 731     }
 732     case Bytecodes::_lshr: {
 733       __ shift_right(left.result(), right.result(), x->operand(), tmp);
 734       break;
 735     }
 736     case Bytecodes::_lushr: {
 737       __ unsigned_shift_right(left.result(), right.result(), x->operand(), tmp);
 738       break;
 739     }
 740     default:
 741       ShouldNotReachHere();
 742     }
 743   }
 744 }
 745 
 746 // _iand, _land, _ior, _lor, _ixor, _lxor
 747 void LIRGenerator::do_LogicOp(LogicOp* x) {
 748 
 749   LIRItem left(x->x(),  this);
 750   LIRItem right(x->y(), this);
 751 
 752   left.load_item();
 753 
 754   rlock_result(x);
 755   if (right.is_constant()
 756       && ((right.type()->tag() == intTag
 757            && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
 758           || (right.type()->tag() == longTag
 759               && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
 760     right.dont_load_item();
 761   } else {
 762     right.load_item();
 763   }
 764   switch (x->op()) {
 765   case Bytecodes::_iand:
 766   case Bytecodes::_land:
 767     __ logical_and(left.result(), right.result(), x->operand()); break;
 768   case Bytecodes::_ior:
 769   case Bytecodes::_lor:
 770     __ logical_or (left.result(), right.result(), x->operand()); break;
 771   case Bytecodes::_ixor:
 772   case Bytecodes::_lxor:
 773     __ logical_xor(left.result(), right.result(), x->operand()); break;
 774   default: Unimplemented();
 775   }
 776 }
 777 
 778 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 779 void LIRGenerator::do_CompareOp(CompareOp* x) {
 780   LIRItem left(x->x(), this);
 781   LIRItem right(x->y(), this);
 782   ValueTag tag = x->x()->type()->tag();
 783   left.load_item();
 784   right.load_item();
 785   LIR_Opr reg = rlock_result(x);
 786 
 787   if (x->x()->type()->is_float_kind()) {
 788     Bytecodes::Code code = x->op();
 789     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 790   } else if (x->x()->type()->tag() == longTag) {
 791     __ lcmp2int(left.result(), right.result(), reg);
 792   } else {
 793     Unimplemented();
 794   }
 795 }
 796 
 797 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 798   assert(x->number_of_arguments() == 4, "wrong type");
 799   LIRItem obj   (x->argument_at(0), this);  // object
 800   LIRItem offset(x->argument_at(1), this);  // offset of field
 801   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
 802   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
 803 
 804   assert(obj.type()->tag() == objectTag, "invalid type");
 805 
 806   // In 64bit the type can be long, sparc doesn't have this assert
 807   // assert(offset.type()->tag() == intTag, "invalid type");
 808 
 809   assert(cmp.type()->tag() == type->tag(), "invalid type");
 810   assert(val.type()->tag() == type->tag(), "invalid type");
 811 
 812   // get address of field
 813   obj.load_item();
 814   offset.load_nonconstant();
 815   if (type == longType) {
 816     // not need if allocator reserves correct pairs
 817     val.load_item_force(FrameMap::long0_opr);
 818   } else {
 819     val.load_item();
 820   }
 821   cmp.load_item();
 822 
 823   LIR_Address* a;
 824   if(offset.result()->is_constant()) {
 825     jint c = offset.result()->as_jint();
 826     a = new LIR_Address(obj.result(),
 827                         c,
 828                         as_BasicType(type));
 829   } else {
 830     a = new LIR_Address(obj.result(),
 831                         offset.result(),
 832                         LIR_Address::times_1,
 833                         0,
 834                         as_BasicType(type));
 835   }
 836   LIR_Opr addr = new_pointer_register();
 837   __ leal(LIR_OprFact::address(a), addr);
 838 
 839   if (type == objectType) {  // Write-barrier needed for Object fields.
 840     // Do the pre-write barrier, if any.
 841     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 842                 true /* do_load */, false /* patch */, NULL);
 843   }
 844 
 845   LIR_Opr result = rlock_result(x);
 846 
 847   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 848   if (type == objectType)
 849     __ cas_obj(addr, cmp.result(), val.result(), ill, ill, result);
 850   else if (type == intType)
 851     __ cas_int(addr, cmp.result(), val.result(), ill, ill, result);
 852   else if (type == longType)
 853     __ cas_long(addr, cmp.result(), val.result(), FrameMap::long1_opr, ill,
 854                 result);
 855   else {
 856     ShouldNotReachHere();
 857   }
 858 
 859   __ logical_xor(result, LIR_OprFact::intConst(1), result);
 860 
 861   if (type == objectType) {   // Write-barrier needed for Object fields.
 862     // Seems to be precise
 863     post_barrier(addr, val.result());
 864   }
 865 }
 866 
 867 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 868   switch (x->id()) {
 869     case vmIntrinsics::_dabs:
 870     case vmIntrinsics::_dsqrt: {
 871       assert(x->number_of_arguments() == 1, "wrong type");
 872       LIRItem value(x->argument_at(0), this);
 873       value.load_item();
 874       LIR_Opr dst = rlock_result(x);
 875 
 876       switch (x->id()) {
 877       case vmIntrinsics::_dsqrt: {
 878         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 879         break;
 880       }
 881       case vmIntrinsics::_dabs: {
 882         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 883         break;
 884       }
 885       }
 886       break;
 887     }
 888     case vmIntrinsics::_dlog10: // fall through
 889     case vmIntrinsics::_dlog: // fall through
 890     case vmIntrinsics::_dsin: // fall through
 891     case vmIntrinsics::_dtan: // fall through
 892     case vmIntrinsics::_dcos: // fall through
 893     case vmIntrinsics::_dexp: {
 894       assert(x->number_of_arguments() == 1, "wrong type");
 895 
 896       address runtime_entry = NULL;
 897       switch (x->id()) {
 898       case vmIntrinsics::_dsin:
 899         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 900         break;
 901       case vmIntrinsics::_dcos:
 902         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 903         break;
 904       case vmIntrinsics::_dtan:
 905         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 906         break;
 907       case vmIntrinsics::_dlog:
 908         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 909         break;
 910       case vmIntrinsics::_dlog10:
 911         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 912         break;
 913       case vmIntrinsics::_dexp:
 914         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 915         break;
 916       default:
 917         ShouldNotReachHere();
 918       }
 919 
 920       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 921       set_result(x, result);
 922       break;
 923     }
 924     case vmIntrinsics::_dpow: {
 925       assert(x->number_of_arguments() == 2, "wrong type");
 926       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 927       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 928       set_result(x, result);
 929       break;
 930     }
 931   }
 932 }
 933 
 934 
 935 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 936   assert(x->number_of_arguments() == 5, "wrong type");
 937 
 938   // Make all state_for calls early since they can emit code
 939   CodeEmitInfo* info = state_for(x, x->state());
 940 
 941   LIRItem src(x->argument_at(0), this);
 942   LIRItem src_pos(x->argument_at(1), this);
 943   LIRItem dst(x->argument_at(2), this);
 944   LIRItem dst_pos(x->argument_at(3), this);
 945   LIRItem length(x->argument_at(4), this);
 946 
 947   // operands for arraycopy must use fixed registers, otherwise
 948   // LinearScan will fail allocation (because arraycopy always needs a
 949   // call)
 950 
 951   // The java calling convention does not give us enough registers
 952   // so we occupy two more: r4 and r5. The fast path code will be able to
 953   // make use of these registers for performance purpose. If going into
 954   // slow path we'll spill extra data to the stack as necessary
 955 
 956   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 957   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 958   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 959   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 960 
 961   length.load_item_force  (FrameMap::as_opr(r4));
 962   LIR_Opr tmp =           FrameMap::as_opr(r5);
 963 
 964   set_no_result(x);
 965 
 966   int flags;
 967   ciArrayKlass* expected_type;
 968   arraycopy_helper(x, &flags, &expected_type);
 969 
 970   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 971 }
 972 
 973 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 974   assert(UseCRC32Intrinsics, "why are we here?");
 975   // Make all state_for calls early since they can emit code
 976   LIR_Opr result = rlock_result(x);
 977   switch (x->id()) {
 978     case vmIntrinsics::_updateCRC32: {
 979       LIRItem crc(x->argument_at(0), this);
 980       LIRItem val(x->argument_at(1), this);
 981       // val is destroyed by update_crc32
 982       val.set_destroys_register();
 983       crc.load_item();
 984       val.load_item();
 985       __ update_crc32(crc.result(), val.result(), result);
 986       break;
 987     }
 988     case vmIntrinsics::_updateBytesCRC32:
 989     case vmIntrinsics::_updateByteBufferCRC32: {
 990       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
 991 
 992       LIRItem crc(x->argument_at(0), this);
 993       LIRItem buf(x->argument_at(1), this);
 994       LIRItem off(x->argument_at(2), this);
 995       LIRItem len(x->argument_at(3), this);
 996       buf.load_item();
 997       off.load_nonconstant();
 998 
 999       LIR_Opr index = off.result();
1000       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1001       if(off.result()->is_constant()) {
1002         index = LIR_OprFact::illegalOpr;
1003        offset += off.result()->as_jint();
1004       }
1005       LIR_Opr base_op = buf.result();
1006 
1007       if (offset) {
1008         LIR_Opr tmp = new_pointer_register();
1009         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1010         base_op = tmp;
1011         offset = 0;
1012       }
1013 
1014       LIR_Address* a = new LIR_Address(base_op,
1015                                        index,
1016                                        LIR_Address::times_1,
1017                                        offset,
1018                                        T_BYTE);
1019       BasicTypeList signature(3);
1020       signature.append(T_INT);
1021       signature.append(T_ADDRESS);
1022       signature.append(T_INT);
1023       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1024       const LIR_Opr result_reg = result_register_for(x->type());
1025 
1026       LIR_Opr addr = new_pointer_register();
1027       __ leal(LIR_OprFact::address(a), addr);
1028 
1029       crc.load_item_force(cc->at(0));
1030       __ move(addr, cc->at(1));
1031       len.load_item_force(cc->at(2));
1032 
1033       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1034       __ move(result_reg, result);
1035 
1036       break;
1037     }
1038     default: {
1039       ShouldNotReachHere();
1040     }
1041   }
1042 }
1043 
1044 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1045 // _i2b, _i2c, _i2s
1046 void LIRGenerator::do_Convert(Convert* x) {
1047   // insired by sparc port
1048   switch (x->op()) {
1049   case Bytecodes::_d2l:
1050   case Bytecodes::_f2l:
1051   case Bytecodes::_l2d:
1052   case Bytecodes::_l2f: {
1053     address entry;
1054 
1055     switch (x->op()) {
1056     case Bytecodes::_d2l:
1057       entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
1058       break;
1059     case Bytecodes::_f2l:
1060       entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
1061       break;
1062     case Bytecodes::_l2d:
1063       entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
1064       break;
1065     case Bytecodes::_l2f:
1066       entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
1067       break;
1068     default:
1069       ShouldNotReachHere();
1070     }
1071 
1072     LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
1073     set_result(x, result);
1074   }
1075   break;
1076 
1077   default:
1078     LIRItem value(x->value(), this);
1079     value.load_item();
1080 
1081     if (x->op() == Bytecodes::_f2i || x->op() == Bytecodes::_d2i) {
1082       value.set_destroys_register();
1083     }
1084 
1085     LIR_Opr input = value.result();
1086     LIR_Opr result = rlock(x);
1087 
1088     __ convert(x->op(), input, result);
1089 
1090     assert(result->is_virtual(), "result must be virtual register");
1091     set_result(x, result);
1092   }
1093 }
1094 
1095 void LIRGenerator::do_NewInstance(NewInstance* x) {
1096 #ifndef PRODUCT
1097   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1098     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1099   }
1100 #endif
1101   CodeEmitInfo* info = state_for(x, x->state());
1102   LIR_Opr reg = result_register_for(x->type());
1103   new_instance(reg, x->klass(), x->is_unresolved(),
1104                        FrameMap::r2_oop_opr,
1105                        FrameMap::r5_oop_opr,
1106                        FrameMap::r4_oop_opr,
1107                        LIR_OprFact::illegalOpr,
1108                        FrameMap::r3_metadata_opr, info);
1109   LIR_Opr result = rlock_result(x);
1110   __ move(reg, result);
1111 }
1112 
1113 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1114   CodeEmitInfo* info = state_for(x, x->state());
1115 
1116   LIRItem length(x->length(), this);
1117   length.load_item_force(FrameMap::r6_opr);
1118 
1119   LIR_Opr reg = result_register_for(x->type());
1120   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1121   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1122   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1123   LIR_Opr tmp4 = reg;
1124   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1125   LIR_Opr len = length.result();
1126   BasicType elem_type = x->elt_type();
1127 
1128   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1129 
1130   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1131   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1132 
1133   LIR_Opr result = rlock_result(x);
1134   __ move(reg, result);
1135 }
1136 
1137 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1138   LIRItem length(x->length(), this);
1139   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1140   // and therefore provide the state before the parameters have been consumed
1141   CodeEmitInfo* patching_info = NULL;
1142   if (!x->klass()->is_loaded() || PatchALot) {
1143     patching_info =  state_for(x, x->state_before());
1144   }
1145 
1146   CodeEmitInfo* info = state_for(x, x->state());
1147 
1148   LIR_Opr reg = result_register_for(x->type());
1149   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1150   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1151   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1152   LIR_Opr tmp4 = reg;
1153   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1154 
1155   length.load_item_force(FrameMap::r6_opr);
1156   LIR_Opr len = length.result();
1157 
1158   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1159   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1160   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1161     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1162   }
1163   klass2reg_with_patching(klass_reg, obj, patching_info);
1164   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1165 
1166   LIR_Opr result = rlock_result(x);
1167   __ move(reg, result);
1168 }
1169 
1170 
1171 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1172   Values* dims = x->dims();
1173   int i = dims->length();
1174   LIRItemList* items = new LIRItemList(dims->length(), NULL);
1175   while (i-- > 0) {
1176     LIRItem* size = new LIRItem(dims->at(i), this);
1177     items->at_put(i, size);
1178   }
1179 
1180   // Evaluate state_for early since it may emit code.
1181   CodeEmitInfo* patching_info = NULL;
1182   if (!x->klass()->is_loaded() || PatchALot) {
1183     patching_info = state_for(x, x->state_before());
1184 
1185     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1186     // clone all handlers (NOTE: Usually this is handled transparently
1187     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1188     // is done explicitly here because a stub isn't being used).
1189     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1190   }
1191   CodeEmitInfo* info = state_for(x, x->state());
1192 
1193   i = dims->length();
1194   while (i-- > 0) {
1195     LIRItem* size = items->at(i);
1196     size->load_item();
1197 
1198     store_stack_parameter(size->result(), in_ByteSize(i*4));
1199   }
1200 
1201   LIR_Opr klass_reg = FrameMap::r1_metadata_opr;
1202   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1203 
1204   LIR_Opr rank = FrameMap::r2_opr;
1205   __ move(LIR_OprFact::intConst(x->rank()), rank);
1206   LIR_Opr varargs = FrameMap::r3_opr;
1207   __ move(FrameMap::sp_opr, varargs);
1208   LIR_OprList* args = new LIR_OprList(3);
1209   args->append(klass_reg);
1210   args->append(rank);
1211   args->append(varargs);
1212   LIR_Opr reg = result_register_for(x->type());
1213   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1214                   LIR_OprFact::illegalOpr,
1215                   reg, args, info);
1216 
1217   LIR_Opr result = rlock_result(x);
1218   __ move(reg, result);
1219 }
1220 
1221 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1222   // nothing to do for now
1223 }
1224 
1225 void LIRGenerator::do_CheckCast(CheckCast* x) {
1226   LIRItem obj(x->obj(), this);
1227 
1228   CodeEmitInfo* patching_info = NULL;
1229   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1230     // must do this before locking the destination register as an oop register,
1231     // and before the obj is loaded (the latter is for deoptimization)
1232     patching_info = state_for(x, x->state_before());
1233   }
1234   obj.load_item();
1235 
1236   // info for exceptions
1237   CodeEmitInfo* info_for_exception = state_for(x);
1238 
1239   CodeStub* stub;
1240   if (x->is_incompatible_class_change_check()) {
1241     assert(patching_info == NULL, "can't patch this");
1242     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1243   } else {
1244     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1245   }
1246   LIR_Opr reg = rlock_result(x);
1247   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1248   if (!x->klass()->is_loaded()) {
1249     tmp3 = new_register(objectType);
1250   }
1251   __ checkcast(reg, obj.result(), x->klass(),
1252                new_register(objectType), new_register(objectType), tmp3,
1253                x->direct_compare(), info_for_exception, patching_info, stub,
1254                x->profiled_method(), x->profiled_bci());
1255 }
1256 
1257 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1258   LIRItem obj(x->obj(), this);
1259 
1260   // result and test object may not be in same register
1261   LIR_Opr reg = rlock_result(x);
1262   CodeEmitInfo* patching_info = NULL;
1263   if ((!x->klass()->is_loaded() || PatchALot)) {
1264     // must do this before locking the destination register as an oop register
1265     patching_info = state_for(x, x->state_before());
1266   }
1267   obj.load_item();
1268   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1269   if (!x->klass()->is_loaded()) {
1270     tmp3 = new_register(objectType);
1271   }
1272   __ instanceof(reg, obj.result(), x->klass(),
1273                 new_register(objectType), new_register(objectType), tmp3,
1274                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1275 }
1276 
1277 void LIRGenerator::do_If(If* x) {
1278   assert(x->number_of_sux() == 2, "inconsistency");
1279   ValueTag tag = x->x()->type()->tag();
1280   bool is_safepoint = x->is_safepoint();
1281 
1282   If::Condition cond = x->cond();
1283 
1284   LIRItem xitem(x->x(), this);
1285   LIRItem yitem(x->y(), this);
1286   LIRItem* xin = &xitem;
1287   LIRItem* yin = &yitem;
1288 
1289   xin->load_item();
1290 
1291   if (yin->is_constant()) {
1292     if (tag == longTag
1293         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1294       yin->dont_load_item();
1295     } else if (tag == intTag
1296         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1297       yin->dont_load_item();
1298     } else if (tag == addressTag
1299         && Assembler::operand_valid_for_add_sub_immediate(yin->get_address_constant())) {
1300       yin->dont_load_item();
1301     } else if (tag == objectTag && yin->get_jobject_constant()->is_null_object()) {
1302       yin->dont_load_item();
1303     } else {
1304       yin->load_item();
1305     }
1306   } else {
1307     yin->load_item();
1308   }
1309 
1310   // add safepoint before generating condition code so it can be recomputed
1311   if (x->is_safepoint()) {
1312     // increment backedge counter if needed
1313     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1314     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1315   }
1316   set_no_result(x);
1317 
1318   LIR_Opr left = xin->result();
1319   LIR_Opr right = yin->result();
1320 
1321   __ cmp(lir_cond(cond), left, right);
1322   // Generate branch profiling. Profiling code doesn't kill flags.
1323   profile_branch(x, cond);
1324   move_to_phi(x->state());
1325   if (x->x()->type()->is_float_kind()) {
1326     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1327   } else {
1328     __ branch(lir_cond(cond), right->type(), x->tsux());
1329   }
1330   assert(x->default_sux() == x->fsux(), "wrong destination above");
1331   __ jump(x->default_sux());
1332 }
1333 
1334 LIR_Opr LIRGenerator::getThreadPointer() {
1335    return FrameMap::as_pointer_opr(rthread);
1336 }
1337 
1338 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1339   __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::r0_opr);
1340   LIR_OprList* args = new LIR_OprList(1);
1341   args->append(FrameMap::r0_opr);
1342   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1343   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1344 }
1345 
1346 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1347                                         CodeEmitInfo* info) {
1348   if (value->is_double_cpu()) {
1349     __ move(value, FrameMap::long0_opr);
1350     __ volatile_store_mem_reg(FrameMap::long0_opr, address, info);
1351   } else {
1352     __ volatile_store_mem_reg(value, address, info);
1353   }
1354 }
1355 
1356 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1357                                        CodeEmitInfo* info) {
1358   if (result->is_double_cpu()) {
1359     __ volatile_load_mem_reg(address, FrameMap::long0_opr, info);
1360     __ move(FrameMap::long0_opr, result);
1361   } else {
1362     __ volatile_load_mem_reg(address, result, info);
1363   }
1364 }
1365 
1366 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1367                                      BasicType type, bool is_volatile) {
1368   LIR_Address* addr = new LIR_Address(src, offset, type);
1369   __ load(addr, dst);
1370 }
1371 
1372 
1373 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1374                                      BasicType type, bool is_volatile) {
1375   LIR_Address* addr = new LIR_Address(src, offset, type);
1376   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1377   if (is_obj) {
1378     // Do the pre-write barrier, if any.
1379     pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1380                 true /* do_load */, false /* patch */, NULL);
1381     __ move(data, addr);
1382     assert(src->is_register(), "must be register");
1383     // Seems to be a precise address
1384     post_barrier(LIR_OprFact::address(addr), data);
1385   } else {
1386     __ move(data, addr);
1387   }
1388 }
1389 
1390 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1391   BasicType type = x->basic_type();
1392   LIRItem src(x->object(), this);
1393   LIRItem off(x->offset(), this);
1394   LIRItem value(x->value(), this);
1395 
1396   src.load_item();
1397   off.load_nonconstant();
1398   if (type == T_LONG && !x->is_add()) {
1399       // not need if allocator reserves correct pairs
1400       value.load_item_force(FrameMap::long1_opr);
1401   } else {
1402     // We can cope with a constant increment in an xadd
1403     if (! (x->is_add()
1404            && value.is_constant()
1405            && can_inline_as_constant(x->value()))) {
1406       value.load_item();
1407     }
1408   }
1409 
1410   bool is_long = (type == T_LONG);
1411   LIR_Opr dst = is_long ? FrameMap::long0_opr : rlock_result(x, type);
1412   LIR_Opr data = value.result();
1413   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1414   LIR_Opr offset = off.result();
1415 
1416   if (data == dst) {
1417     LIR_Opr tmp = new_register(data->type());
1418     __ move(data, tmp);
1419     data = tmp;
1420   }
1421 
1422   LIR_Address* addr;
1423   if (offset->is_constant()) {
1424     addr = new LIR_Address(src.result(), offset->as_jint(), type);
1425   } else {
1426     addr = new LIR_Address(src.result(), offset, type);
1427   }
1428 
1429   LIR_Opr tmp = new_register(T_INT);
1430   LIR_Opr ptr = LIR_OprFact::illegalOpr;
1431 
1432   if (x->is_add()) {
1433     __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1434   } else {
1435     if (is_obj) {
1436       // Do the pre-write barrier, if any.
1437       ptr = new_pointer_register();
1438       __ add(src.result(), off.result(), ptr);
1439       pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1440                   true /* do_load */, false /* patch */, NULL);
1441     }
1442     __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1443     if (is_obj) {
1444       post_barrier(ptr, data);
1445     }
1446   }
1447 
1448   if (is_long) {
1449     dst = rlock_result(x, type);
1450     __ move(FrameMap::long0_opr, dst);
1451   }
1452 }