1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "vmreg_ppc.inline.hpp"
  40 
  41 #ifdef ASSERT
  42 #define __ gen()->lir(__FILE__, __LINE__)->
  43 #else
  44 #define __ gen()->lir()->
  45 #endif
  46 
  47 void LIRItem::load_byte_item() {
  48   // Byte loads use same registers as other loads.
  49   load_item();
  50 }
  51 
  52 
  53 void LIRItem::load_nonconstant() {
  54   LIR_Opr r = value()->operand();
  55   if (_gen->can_inline_as_constant(value())) {
  56     if (!r->is_constant()) {
  57       r = LIR_OprFact::value_type(value()->type());
  58     }
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 
  66 inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
  67   LIR_Opr r = li.value()->operand();
  68   if (r->is_register()) {
  69     LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
  70     ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
  71   } else {
  72     // Constants or memory get loaded with sign extend on this platform.
  73     ll->move(li.result(), dst);
  74   }
  75 }
  76 
  77 
  78 //--------------------------------------------------------------
  79 //               LIRGenerator
  80 //--------------------------------------------------------------
  81 
  82 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::R3_oop_opr; }
  83 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::R4_opr; }
  84 LIR_Opr LIRGenerator::syncLockOpr()                  { return FrameMap::R5_opr; }     // Need temp effect for MonitorEnterStub.
  85 LIR_Opr LIRGenerator::syncTempOpr()                  { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
  86 LIR_Opr LIRGenerator::getThreadTemp()                { return LIR_OprFact::illegalOpr; } // not needed
  87 
  88 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  89   LIR_Opr opr;
  90   switch (type->tag()) {
  91   case intTag:     opr = FrameMap::R3_opr;         break;
  92   case objectTag:  opr = FrameMap::R3_oop_opr;     break;
  93   case longTag:    opr = FrameMap::R3_long_opr;    break;
  94   case floatTag:   opr = FrameMap::F1_opr;         break;
  95   case doubleTag:  opr = FrameMap::F1_double_opr;  break;
  96 
  97   case addressTag:
  98   default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  99   }
 100 
 101   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 102   return opr;
 103 }
 104 
 105 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
 106   ShouldNotReachHere();
 107   return LIR_OprFact::illegalOpr;
 108 }
 109 
 110 
 111 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 112   return new_register(T_INT);
 113 }
 114 
 115 
 116 //--------- loading items into registers --------------------------------
 117 
 118 // PPC cannot inline all constants.
 119 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 120   if (v->type()->as_IntConstant() != NULL) {
 121     return Assembler::is_simm16(v->type()->as_IntConstant()->value());
 122   } else if (v->type()->as_LongConstant() != NULL) {
 123     return Assembler::is_simm16(v->type()->as_LongConstant()->value());
 124   } else if (v->type()->as_ObjectConstant() != NULL) {
 125     return v->type()->as_ObjectConstant()->value()->is_null_object();
 126   } else {
 127     return false;
 128   }
 129 }
 130 
 131 
 132 // Only simm16 constants can be inlined.
 133 bool LIRGenerator::can_inline_as_constant(Value i) const {
 134   return can_store_as_constant(i, as_BasicType(i->type()));
 135 }
 136 
 137 
 138 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 139   if (c->type() == T_INT) {
 140     return Assembler::is_simm16(c->as_jint());
 141   }
 142   if (c->type() == T_LONG) {
 143     return Assembler::is_simm16(c->as_jlong());
 144   }
 145   if (c->type() == T_OBJECT) {
 146     return c->as_jobject() == NULL;
 147   }
 148   return false;
 149 }
 150 
 151 
 152 LIR_Opr LIRGenerator::safepoint_poll_register() {
 153   return new_register(T_INT);
 154 }
 155 
 156 
 157 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 158                                             int shift, int disp, BasicType type) {
 159   assert(base->is_register(), "must be");
 160 
 161   // Accumulate fixed displacements.
 162   if (index->is_constant()) {
 163     disp += index->as_constant_ptr()->as_jint() << shift;
 164     index = LIR_OprFact::illegalOpr;
 165   }
 166 
 167   if (index->is_register()) {
 168     // Apply the shift and accumulate the displacement.
 169     if (shift > 0) {
 170       LIR_Opr tmp = new_pointer_register();
 171       __ shift_left(index, shift, tmp);
 172       index = tmp;
 173     }
 174     if (disp != 0) {
 175       LIR_Opr tmp = new_pointer_register();
 176       if (Assembler::is_simm16(disp)) {
 177         __ add(index, LIR_OprFact::intptrConst(disp), tmp);
 178         index = tmp;
 179       } else {
 180         __ move(LIR_OprFact::intptrConst(disp), tmp);
 181         __ add(tmp, index, tmp);
 182         index = tmp;
 183       }
 184       disp = 0;
 185     }
 186   } else if (!Assembler::is_simm16(disp)) {
 187     // Index is illegal so replace it with the displacement loaded into a register.
 188     index = new_pointer_register();
 189     __ move(LIR_OprFact::intptrConst(disp), index);
 190     disp = 0;
 191   }
 192 
 193   // At this point we either have base + index or base + displacement.
 194   if (disp == 0) {
 195     return new LIR_Address(base, index, type);
 196   } else {
 197     assert(Assembler::is_simm16(disp), "must be");
 198     return new LIR_Address(base, disp, type);
 199   }
 200 }
 201 
 202 
 203 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 204                                               BasicType type, bool needs_card_mark) {
 205   int elem_size = type2aelembytes(type);
 206   int shift = exact_log2(elem_size);
 207 
 208   LIR_Opr base_opr;
 209   int offset = arrayOopDesc::base_offset_in_bytes(type);
 210 
 211   if (index_opr->is_constant()) {
 212     int i = index_opr->as_constant_ptr()->as_jint();
 213     int array_offset = i * elem_size;
 214     if (Assembler::is_simm16(array_offset + offset)) {
 215       base_opr = array_opr;
 216       offset = array_offset + offset;
 217     } else {
 218       base_opr = new_pointer_register();
 219       if (Assembler::is_simm16(array_offset)) {
 220         __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
 221       } else {
 222         __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
 223         __ add(base_opr, array_opr, base_opr);
 224       }
 225     }
 226   } else {
 227 #ifdef _LP64
 228     if (index_opr->type() == T_INT) {
 229       LIR_Opr tmp = new_register(T_LONG);
 230       __ convert(Bytecodes::_i2l, index_opr, tmp);
 231       index_opr = tmp;
 232     }
 233 #endif
 234 
 235     base_opr = new_pointer_register();
 236     assert (index_opr->is_register(), "Must be register");
 237     if (shift > 0) {
 238       __ shift_left(index_opr, shift, base_opr);
 239       __ add(base_opr, array_opr, base_opr);
 240     } else {
 241       __ add(index_opr, array_opr, base_opr);
 242     }
 243   }
 244   if (needs_card_mark) {
 245     LIR_Opr ptr = new_pointer_register();
 246     __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
 247     return new LIR_Address(ptr, type);
 248   } else {
 249     return new LIR_Address(base_opr, offset, type);
 250   }
 251 }
 252 
 253 
 254 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 255   LIR_Opr r = NULL;
 256   if (type == T_LONG) {
 257     r = LIR_OprFact::longConst(x);
 258   } else if (type == T_INT) {
 259     r = LIR_OprFact::intConst(x);
 260   } else {
 261     ShouldNotReachHere();
 262   }
 263   if (!Assembler::is_simm16(x)) {
 264     LIR_Opr tmp = new_register(type);
 265     __ move(r, tmp);
 266     return tmp;
 267   }
 268   return r;
 269 }
 270 
 271 
 272 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 273   LIR_Opr pointer = new_pointer_register();
 274   __ move(LIR_OprFact::intptrConst(counter), pointer);
 275   LIR_Address* addr = new LIR_Address(pointer, type);
 276   increment_counter(addr, step);
 277 }
 278 
 279 
 280 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 281   LIR_Opr temp = new_register(addr->type());
 282   __ move(addr, temp);
 283   __ add(temp, load_immediate(step, addr->type()), temp);
 284   __ move(temp, addr);
 285 }
 286 
 287 
 288 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 289   LIR_Opr tmp = FrameMap::R0_opr;
 290   __ load(new LIR_Address(base, disp, T_INT), tmp, info);
 291   __ cmp(condition, tmp, c);
 292 }
 293 
 294 
 295 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
 296                                int disp, BasicType type, CodeEmitInfo* info) {
 297   LIR_Opr tmp = FrameMap::R0_opr;
 298   __ load(new LIR_Address(base, disp, type), tmp, info);
 299   __ cmp(condition, reg, tmp);
 300 }
 301 
 302 
 303 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
 304                                LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
 305   LIR_Opr tmp = FrameMap::R0_opr;
 306   __ load(new LIR_Address(base, disp, type), tmp, info);
 307   __ cmp(condition, reg, tmp);
 308 }
 309 
 310 
 311 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 312   assert(left != result, "should be different registers");
 313   if (is_power_of_2(c + 1)) {
 314     __ shift_left(left, log2_intptr(c + 1), result);
 315     __ sub(result, left, result);
 316     return true;
 317   } else if (is_power_of_2(c - 1)) {
 318     __ shift_left(left, log2_intptr(c - 1), result);
 319     __ add(result, left, result);
 320     return true;
 321   }
 322   return false;
 323 }
 324 
 325 
 326 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
 327   BasicType t = item->type();
 328   LIR_Opr sp_opr = FrameMap::SP_opr;
 329   if ((t == T_LONG || t == T_DOUBLE) &&
 330       ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
 331     __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
 332   } else {
 333     __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
 334   }
 335 }
 336 
 337 
 338 //----------------------------------------------------------------------
 339 //             visitor functions
 340 //----------------------------------------------------------------------
 341 
 342 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 343   assert(x->is_pinned(),"");
 344   bool needs_range_check = x->compute_needs_range_check();
 345   bool use_length = x->length() != NULL;
 346   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 347   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 348                                          !get_jobject_constant(x->value())->is_null_object() ||
 349                                          x->should_profile());
 350 
 351   LIRItem array(x->array(), this);
 352   LIRItem index(x->index(), this);
 353   LIRItem value(x->value(), this);
 354   LIRItem length(this);
 355 
 356   array.load_item();
 357   index.load_nonconstant();
 358 
 359   if (use_length && needs_range_check) {
 360     length.set_instruction(x->length());
 361     length.load_item();
 362   }
 363   if (needs_store_check) {
 364     value.load_item();
 365   } else {
 366     value.load_for_store(x->elt_type());
 367   }
 368 
 369   set_no_result(x);
 370 
 371   // The CodeEmitInfo must be duplicated for each different
 372   // LIR-instruction because spilling can occur anywhere between two
 373   // instructions and so the debug information must be different.
 374   CodeEmitInfo* range_check_info = state_for(x);
 375   CodeEmitInfo* null_check_info = NULL;
 376   if (x->needs_null_check()) {
 377     null_check_info = new CodeEmitInfo(range_check_info);
 378   }
 379 
 380   // Emit array address setup early so it schedules better.
 381   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 382 
 383   if (GenerateRangeChecks && needs_range_check) {
 384     if (use_length) {
 385       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 386       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 387     } else {
 388       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 389       // Range_check also does the null check.
 390       null_check_info = NULL;
 391     }
 392   }
 393 
 394   if (GenerateArrayStoreCheck && needs_store_check) {
 395     // Following registers are used by slow_subtype_check:
 396     LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
 397     LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
 398     LIR_Opr tmp3 = FrameMap::R6_opr; // temp
 399 
 400     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 401     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3,
 402                    store_check_info, x->profiled_method(), x->profiled_bci());
 403   }
 404 
 405   if (obj_store) {
 406     // Needs GC write barriers.
 407     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 408                 true /* do_load */, false /* patch */, NULL);
 409   }
 410   __ move(value.result(), array_addr, null_check_info);
 411   if (obj_store) {
 412     // Precise card mark.
 413     post_barrier(LIR_OprFact::address(array_addr), value.result());
 414   }
 415 }
 416 
 417 
 418 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 419   assert(x->is_pinned(),"");
 420   LIRItem obj(x->obj(), this);
 421   obj.load_item();
 422 
 423   set_no_result(x);
 424 
 425   // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
 426   LIR_Opr lock    = FrameMap::R5_opr;
 427   LIR_Opr scratch = FrameMap::R4_opr;
 428   LIR_Opr hdr     = FrameMap::R6_opr;
 429 
 430   CodeEmitInfo* info_for_exception = NULL;
 431   if (x->needs_null_check()) {
 432     info_for_exception = state_for(x);
 433   }
 434 
 435   // This CodeEmitInfo must not have the xhandlers because here the
 436   // object is already locked (xhandlers expects object to be unlocked).
 437   CodeEmitInfo* info = state_for(x, x->state(), true);
 438   monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
 439 }
 440 
 441 
 442 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 443   assert(x->is_pinned(),"");
 444   LIRItem obj(x->obj(), this);
 445   obj.dont_load_item();
 446 
 447   set_no_result(x);
 448   LIR_Opr lock     = FrameMap::R5_opr;
 449   LIR_Opr hdr      = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
 450   LIR_Opr obj_temp = FrameMap::R6_opr;
 451   monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
 452 }
 453 
 454 
 455 // _ineg, _lneg, _fneg, _dneg
 456 void LIRGenerator::do_NegateOp(NegateOp* x) {
 457   LIRItem value(x->x(), this);
 458   value.load_item();
 459   LIR_Opr reg = rlock_result(x);
 460   __ negate(value.result(), reg);
 461 }
 462 
 463 
 464 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 465 //      _dadd, _dmul, _dsub, _ddiv, _drem
 466 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 467   switch (x->op()) {
 468   case Bytecodes::_fadd:
 469   case Bytecodes::_fmul:
 470   case Bytecodes::_fsub:
 471   case Bytecodes::_fdiv:
 472   case Bytecodes::_dadd:
 473   case Bytecodes::_dmul:
 474   case Bytecodes::_dsub:
 475   case Bytecodes::_ddiv: {
 476     LIRItem left(x->x(), this);
 477     LIRItem right(x->y(), this);
 478     left.load_item();
 479     right.load_item();
 480     rlock_result(x);
 481     arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
 482   }
 483   break;
 484 
 485   case Bytecodes::_frem:
 486   case Bytecodes::_drem: {
 487     address entry = NULL;
 488     switch (x->op()) {
 489     case Bytecodes::_frem:
 490       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 491       break;
 492     case Bytecodes::_drem:
 493       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 494       break;
 495     default:
 496       ShouldNotReachHere();
 497     }
 498     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
 499     set_result(x, result);
 500   }
 501   break;
 502 
 503   default: ShouldNotReachHere();
 504   }
 505 }
 506 
 507 
 508 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 509 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 510   bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
 511 
 512   LIRItem right(x->y(), this);
 513   // Missing test if instr is commutative and if we should swap.
 514   if (right.value()->type()->as_LongConstant() &&
 515       (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) {
 516     // Sub is implemented by addi and can't support min_simm16 as constant..
 517     right.load_item();
 518   } else {
 519     right.load_nonconstant();
 520   }
 521   assert(right.is_constant() || right.is_register(), "wrong state of right");
 522 
 523   if (is_div_rem) {
 524     LIR_Opr divisor = right.result();
 525     if (divisor->is_register()) {
 526       CodeEmitInfo* null_check_info = state_for(x);
 527       __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
 528       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(null_check_info));
 529     } else {
 530       jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
 531       if (const_divisor == 0) {
 532         CodeEmitInfo* null_check_info = state_for(x);
 533         __ jump(new DivByZeroStub(null_check_info));
 534         rlock_result(x);
 535         __ move(LIR_OprFact::longConst(0), x->operand()); // dummy
 536         return;
 537       }
 538       if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 539         // Remainder computation would need additional tmp != R0.
 540         right.load_item();
 541       }
 542     }
 543   }
 544 
 545   LIRItem left(x->x(), this);
 546   left.load_item();
 547   rlock_result(x);
 548   if (is_div_rem) {
 549     CodeEmitInfo* info = NULL; // Null check already done above.
 550     LIR_Opr tmp = FrameMap::R0_opr;
 551     if (x->op() == Bytecodes::_lrem) {
 552       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 553     } else if (x->op() == Bytecodes::_ldiv) {
 554       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 555     }
 556   } else {
 557     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 558   }
 559 }
 560 
 561 
 562 // for: _iadd, _imul, _isub, _idiv, _irem
 563 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 564   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
 565 
 566   LIRItem right(x->y(), this);
 567   // Missing test if instr is commutative and if we should swap.
 568   if (right.value()->type()->as_IntConstant() &&
 569       (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) {
 570     // Sub is implemented by addi and can't support min_simm16 as constant.
 571     right.load_item();
 572   } else {
 573     right.load_nonconstant();
 574   }
 575   assert(right.is_constant() || right.is_register(), "wrong state of right");
 576 
 577   if (is_div_rem) {
 578     LIR_Opr divisor = right.result();
 579     if (divisor->is_register()) {
 580       CodeEmitInfo* null_check_info = state_for(x);
 581       __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
 582       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(null_check_info));
 583     } else {
 584       jint const_divisor = divisor->as_constant_ptr()->as_jint();
 585       if (const_divisor == 0) {
 586         CodeEmitInfo* null_check_info = state_for(x);
 587         __ jump(new DivByZeroStub(null_check_info));
 588         rlock_result(x);
 589         __ move(LIR_OprFact::intConst(0), x->operand()); // dummy
 590         return;
 591       }
 592       if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 593         // Remainder computation would need additional tmp != R0.
 594         right.load_item();
 595       }
 596     }
 597   }
 598 
 599   LIRItem left(x->x(), this);
 600   left.load_item();
 601   rlock_result(x);
 602   if (is_div_rem) {
 603     CodeEmitInfo* info = NULL; // Null check already done above.
 604     LIR_Opr tmp = FrameMap::R0_opr;
 605     if (x->op() == Bytecodes::_irem) {
 606       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 607     } else if (x->op() == Bytecodes::_idiv) {
 608       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 609     }
 610   } else {
 611     arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
 612   }
 613 }
 614 
 615 
 616 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 617   ValueTag tag = x->type()->tag();
 618   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 619   switch (tag) {
 620     case floatTag:
 621     case doubleTag: do_ArithmeticOp_FPU(x);  return;
 622     case longTag:   do_ArithmeticOp_Long(x); return;
 623     case intTag:    do_ArithmeticOp_Int(x);  return;
 624   }
 625   ShouldNotReachHere();
 626 }
 627 
 628 
 629 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 630 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 631   LIRItem value(x->x(), this);
 632   LIRItem count(x->y(), this);
 633   value.load_item();
 634   LIR_Opr reg = rlock_result(x);
 635   LIR_Opr mcount;
 636   if (count.result()->is_register()) {
 637     mcount = FrameMap::R0_opr;
 638   } else {
 639     mcount = LIR_OprFact::illegalOpr;
 640   }
 641   shift_op(x->op(), reg, value.result(), count.result(), mcount);
 642 }
 643 
 644 
 645 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
 646   jlong int_or_long_const;
 647   if (type->as_IntConstant()) {
 648     int_or_long_const = type->as_IntConstant()->value();
 649   } else if (type->as_LongConstant()) {
 650     int_or_long_const = type->as_LongConstant()->value();
 651   } else if (type->as_ObjectConstant()) {
 652     return type->as_ObjectConstant()->value()->is_null_object();
 653   } else {
 654     return false;
 655   }
 656 
 657   if (Assembler::is_uimm(int_or_long_const, 16)) return true;
 658   if ((int_or_long_const & 0xFFFF) == 0 &&
 659       Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
 660 
 661   // see Assembler::andi
 662   if (bc == Bytecodes::_iand &&
 663       (is_power_of_2_long(int_or_long_const+1) ||
 664        is_power_of_2_long(int_or_long_const) ||
 665        is_power_of_2_long(-int_or_long_const))) return true;
 666   if (bc == Bytecodes::_land &&
 667       (is_power_of_2_long(int_or_long_const+1) ||
 668        (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2_long(int_or_long_const)) ||
 669        (int_or_long_const != min_jlong && is_power_of_2_long(-int_or_long_const)))) return true;
 670 
 671   // special case: xor -1
 672   if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
 673       int_or_long_const == -1) return true;
 674   return false;
 675 }
 676 
 677 
 678 // _iand, _land, _ior, _lor, _ixor, _lxor
 679 void LIRGenerator::do_LogicOp(LogicOp* x) {
 680   LIRItem left(x->x(), this);
 681   LIRItem right(x->y(), this);
 682 
 683   left.load_item();
 684 
 685   Value rval = right.value();
 686   LIR_Opr r = rval->operand();
 687   ValueType *type = rval->type();
 688   // Logic instructions use unsigned immediate values.
 689   if (can_handle_logic_op_as_uimm(type, x->op())) {
 690     if (!r->is_constant()) {
 691       r = LIR_OprFact::value_type(type);
 692       rval->set_operand(r);
 693     }
 694     right.set_result(r);
 695   } else {
 696     right.load_item();
 697   }
 698 
 699   LIR_Opr reg = rlock_result(x);
 700 
 701   logic_op(x->op(), reg, left.result(), right.result());
 702 }
 703 
 704 
 705 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 706 void LIRGenerator::do_CompareOp(CompareOp* x) {
 707   LIRItem left(x->x(), this);
 708   LIRItem right(x->y(), this);
 709   left.load_item();
 710   right.load_item();
 711   LIR_Opr reg = rlock_result(x);
 712   if (x->x()->type()->is_float_kind()) {
 713     Bytecodes::Code code = x->op();
 714     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 715   } else if (x->x()->type()->tag() == longTag) {
 716     __ lcmp2int(left.result(), right.result(), reg);
 717   } else {
 718     Unimplemented();
 719   }
 720 }
 721 
 722 
 723 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 724   assert(x->number_of_arguments() == 4, "wrong type");
 725   LIRItem obj   (x->argument_at(0), this);  // object
 726   LIRItem offset(x->argument_at(1), this);  // offset of field
 727   LIRItem cmp   (x->argument_at(2), this);  // Value to compare with field.
 728   LIRItem val   (x->argument_at(3), this);  // Replace field with val if matches cmp.
 729 
 730   LIR_Opr t1 = LIR_OprFact::illegalOpr;
 731   LIR_Opr t2 = LIR_OprFact::illegalOpr;
 732   LIR_Opr addr = new_pointer_register();
 733 
 734   // Get address of field.
 735   obj.load_item();
 736   offset.load_item();
 737   cmp.load_item();
 738   val.load_item();
 739 
 740   __ add(obj.result(), offset.result(), addr);
 741 
 742   // Volatile load may be followed by Unsafe CAS.
 743   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 744     __ membar(); // To be safe. Unsafe semantics are unclear.
 745   } else {
 746     __ membar_release();
 747   }
 748 
 749   if (type == objectType) {  // Write-barrier needed for Object fields.
 750     // Only cmp value can get overwritten, no do_load required.
 751     pre_barrier(LIR_OprFact::illegalOpr /* addr */, cmp.result() /* pre_val */,
 752                 false /* do_load */, false /* patch */, NULL);
 753   }
 754 
 755   if (type == objectType) {
 756     if (UseCompressedOops) {
 757       t1 = new_register(T_OBJECT);
 758       t2 = new_register(T_OBJECT);
 759     }
 760     __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
 761   } else if (type == intType) {
 762     __ cas_int(addr, cmp.result(), val.result(), t1, t2);
 763   } else if (type == longType) {
 764     __ cas_long(addr, cmp.result(), val.result(), t1, t2);
 765   } else {
 766     ShouldNotReachHere();
 767   }
 768   // Benerate conditional move of boolean result.
 769   LIR_Opr result = rlock_result(x);
 770   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 771            result, as_BasicType(type));
 772   if (type == objectType) {  // Write-barrier needed for Object fields.
 773     // Precise card mark since could either be object or array.
 774     post_barrier(addr, val.result());
 775   }
 776 }
 777 
 778 
 779 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 780   switch (x->id()) {
 781     case vmIntrinsics::_dabs: {
 782       assert(x->number_of_arguments() == 1, "wrong type");
 783       LIRItem value(x->argument_at(0), this);
 784       value.load_item();
 785       LIR_Opr dst = rlock_result(x);
 786       __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 787       break;
 788     }
 789     case vmIntrinsics::_dsqrt: {
 790       if (VM_Version::has_fsqrt()) {
 791         assert(x->number_of_arguments() == 1, "wrong type");
 792         LIRItem value(x->argument_at(0), this);
 793         value.load_item();
 794         LIR_Opr dst = rlock_result(x);
 795         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 796         break;
 797       } // else fallthru
 798     }
 799     case vmIntrinsics::_dlog10: // fall through
 800     case vmIntrinsics::_dlog: // fall through
 801     case vmIntrinsics::_dsin: // fall through
 802     case vmIntrinsics::_dtan: // fall through
 803     case vmIntrinsics::_dcos: // fall through
 804     case vmIntrinsics::_dexp: {
 805       assert(x->number_of_arguments() == 1, "wrong type");
 806 
 807       address runtime_entry = NULL;
 808       switch (x->id()) {
 809       case vmIntrinsics::_dsqrt:
 810         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
 811         break;
 812       case vmIntrinsics::_dsin:
 813         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 814         break;
 815       case vmIntrinsics::_dcos:
 816         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 817         break;
 818       case vmIntrinsics::_dtan:
 819         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 820         break;
 821       case vmIntrinsics::_dlog:
 822         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 823         break;
 824       case vmIntrinsics::_dlog10:
 825         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 826         break;
 827       case vmIntrinsics::_dexp:
 828         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 829         break;
 830       default:
 831         ShouldNotReachHere();
 832       }
 833 
 834       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 835       set_result(x, result);
 836       break;
 837     }
 838     case vmIntrinsics::_dpow: {
 839       assert(x->number_of_arguments() == 2, "wrong type");
 840       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 841       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 842       set_result(x, result);
 843       break;
 844     }
 845   }
 846 }
 847 
 848 
 849 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 850   assert(x->number_of_arguments() == 5, "wrong type");
 851 
 852   // Make all state_for calls early since they can emit code.
 853   CodeEmitInfo* info = state_for(x, x->state());
 854 
 855   LIRItem src     (x->argument_at(0), this);
 856   LIRItem src_pos (x->argument_at(1), this);
 857   LIRItem dst     (x->argument_at(2), this);
 858   LIRItem dst_pos (x->argument_at(3), this);
 859   LIRItem length  (x->argument_at(4), this);
 860 
 861   // Load all values in callee_save_registers (C calling convention),
 862   // as this makes the parameter passing to the fast case simpler.
 863   src.load_item_force     (FrameMap::R14_oop_opr);
 864   src_pos.load_item_force (FrameMap::R15_opr);
 865   dst.load_item_force     (FrameMap::R17_oop_opr);
 866   dst_pos.load_item_force (FrameMap::R18_opr);
 867   length.load_item_force  (FrameMap::R19_opr);
 868   LIR_Opr tmp =            FrameMap::R20_opr;
 869 
 870   int flags;
 871   ciArrayKlass* expected_type;
 872   arraycopy_helper(x, &flags, &expected_type);
 873 
 874   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
 875                length.result(), tmp,
 876                expected_type, flags, info);
 877   set_no_result(x);
 878 }
 879 
 880 
 881 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 882 // _i2b, _i2c, _i2s
 883 void LIRGenerator::do_Convert(Convert* x) {
 884   switch (x->op()) {
 885 
 886     // int -> float: force spill
 887     case Bytecodes::_l2f: {
 888       if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
 889         // fcfid+frsp needs fixup code to avoid rounding incompatibility.
 890         address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
 891         LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
 892         set_result(x, result);
 893         break;
 894       } // else fallthru
 895     }
 896     case Bytecodes::_l2d: {
 897       LIRItem value(x->value(), this);
 898       LIR_Opr reg = rlock_result(x);
 899       value.load_item();
 900       LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
 901       __ convert(x->op(), tmp, reg);
 902       break;
 903     }
 904     case Bytecodes::_i2f:
 905     case Bytecodes::_i2d: {
 906       LIRItem value(x->value(), this);
 907       LIR_Opr reg = rlock_result(x);
 908       value.load_item();
 909       // Convert i2l first.
 910       LIR_Opr tmp1 = new_register(T_LONG);
 911       __ convert(Bytecodes::_i2l, value.result(), tmp1);
 912       LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
 913       __ convert(x->op(), tmp2, reg);
 914       break;
 915     }
 916 
 917     // float -> int: result will be stored
 918     case Bytecodes::_f2l:
 919     case Bytecodes::_d2l: {
 920       LIRItem value(x->value(), this);
 921       LIR_Opr reg = rlock_result(x);
 922       value.set_destroys_register(); // USE_KILL
 923       value.load_item();
 924       set_vreg_flag(reg, must_start_in_memory);
 925       __ convert(x->op(), value.result(), reg);
 926       break;
 927     }
 928     case Bytecodes::_f2i:
 929     case Bytecodes::_d2i: {
 930       LIRItem value(x->value(), this);
 931       LIR_Opr reg = rlock_result(x);
 932       value.set_destroys_register(); // USE_KILL
 933       value.load_item();
 934       // Convert l2i afterwards.
 935       LIR_Opr tmp1 = new_register(T_LONG);
 936       set_vreg_flag(tmp1, must_start_in_memory);
 937       __ convert(x->op(), value.result(), tmp1);
 938       __ convert(Bytecodes::_l2i, tmp1, reg);
 939       break;
 940     }
 941 
 942     // Within same category: just register conversions.
 943     case Bytecodes::_i2b:
 944     case Bytecodes::_i2c:
 945     case Bytecodes::_i2s:
 946     case Bytecodes::_i2l:
 947     case Bytecodes::_l2i:
 948     case Bytecodes::_f2d:
 949     case Bytecodes::_d2f: {
 950       LIRItem value(x->value(), this);
 951       LIR_Opr reg = rlock_result(x);
 952       value.load_item();
 953       __ convert(x->op(), value.result(), reg);
 954       break;
 955     }
 956 
 957     default: ShouldNotReachHere();
 958   }
 959 }
 960 
 961 
 962 void LIRGenerator::do_NewInstance(NewInstance* x) {
 963   // This instruction can be deoptimized in the slow path.
 964   const LIR_Opr reg = result_register_for(x->type());
 965 #ifndef PRODUCT
 966   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 967     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 968   }
 969 #endif
 970   CodeEmitInfo* info = state_for(x, x->state());
 971   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
 972   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 973   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 974   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 975   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 976   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
 977 
 978   // Must prevent reordering of stores for object initialization
 979   // with stores that publish the new object.
 980   __ membar_storestore();
 981   LIR_Opr result = rlock_result(x);
 982   __ move(reg, result);
 983 }
 984 
 985 
 986 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 987   // Evaluate state_for early since it may emit code.
 988   CodeEmitInfo* info = state_for(x, x->state());
 989 
 990   LIRItem length(x->length(), this);
 991   length.load_item();
 992 
 993   LIR_Opr reg = result_register_for(x->type());
 994   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
 995   // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
 996   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 997   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 998   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 999   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
1000   LIR_Opr len = length.result();
1001   BasicType elem_type = x->elt_type();
1002 
1003   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1004 
1005   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1006   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1007 
1008   // Must prevent reordering of stores for object initialization
1009   // with stores that publish the new object.
1010   __ membar_storestore();
1011   LIR_Opr result = rlock_result(x);
1012   __ move(reg, result);
1013 }
1014 
1015 
1016 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1017   // Evaluate state_for early since it may emit code.
1018   CodeEmitInfo* info = state_for(x, x->state());
1019   // In case of patching (i.e., object class is not yet loaded),
1020   // we need to reexecute the instruction and therefore provide
1021   // the state before the parameters have been consumed.
1022   CodeEmitInfo* patching_info = NULL;
1023   if (!x->klass()->is_loaded() || PatchALot) {
1024     patching_info = state_for(x, x->state_before());
1025   }
1026 
1027   LIRItem length(x->length(), this);
1028   length.load_item();
1029 
1030   const LIR_Opr reg = result_register_for(x->type());
1031   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
1032   // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
1033   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
1034   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
1035   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
1036   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
1037   LIR_Opr len = length.result();
1038 
1039   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1040   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
1041   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1042     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1043   }
1044   klass2reg_with_patching(klass_reg, obj, patching_info);
1045   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1046 
1047   // Must prevent reordering of stores for object initialization
1048   // with stores that publish the new object.
1049   __ membar_storestore();
1050   LIR_Opr result = rlock_result(x);
1051   __ move(reg, result);
1052 }
1053 
1054 
1055 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1056   Values* dims = x->dims();
1057   int i = dims->length();
1058   LIRItemList* items = new LIRItemList(dims->length(), NULL);
1059   while (i-- > 0) {
1060     LIRItem* size = new LIRItem(dims->at(i), this);
1061     items->at_put(i, size);
1062   }
1063 
1064   // Evaluate state_for early since it may emit code.
1065   CodeEmitInfo* patching_info = NULL;
1066   if (!x->klass()->is_loaded() || PatchALot) {
1067     patching_info = state_for(x, x->state_before());
1068 
1069     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1070     // clone all handlers (NOTE: Usually this is handled transparently
1071     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1072     // is done explicitly here because a stub isn't being used).
1073     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1074   }
1075   CodeEmitInfo* info = state_for(x, x->state());
1076 
1077   i = dims->length();
1078   while (i-- > 0) {
1079     LIRItem* size = items->at(i);
1080     size->load_nonconstant();
1081     // FrameMap::_reserved_argument_area_size includes the dimensions
1082     // varargs, because it's initialized to hir()->max_stack() when the
1083     // FrameMap is created.
1084     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
1085   }
1086 
1087   const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
1088   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1089 
1090   LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
1091   __ move(LIR_OprFact::intConst(x->rank()), rank);
1092 
1093   LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
1094   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
1095           varargs);
1096 
1097   // Note: This instruction can be deoptimized in the slow path.
1098   LIR_OprList* args = new LIR_OprList(3);
1099   args->append(klass_reg);
1100   args->append(rank);
1101   args->append(varargs);
1102   const LIR_Opr reg = result_register_for(x->type());
1103   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1104                   LIR_OprFact::illegalOpr,
1105                   reg, args, info);
1106 
1107   // Must prevent reordering of stores for object initialization
1108   // with stores that publish the new object.
1109   __ membar_storestore();
1110   LIR_Opr result = rlock_result(x);
1111   __ move(reg, result);
1112 }
1113 
1114 
1115 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1116   // nothing to do for now
1117 }
1118 
1119 
1120 void LIRGenerator::do_CheckCast(CheckCast* x) {
1121   LIRItem obj(x->obj(), this);
1122   CodeEmitInfo* patching_info = NULL;
1123   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1124     // Must do this before locking the destination register as
1125     // an oop register, and before the obj is loaded (so x->obj()->item()
1126     // is valid for creating a debug info location).
1127     patching_info = state_for(x, x->state_before());
1128   }
1129   obj.load_item();
1130   LIR_Opr out_reg = rlock_result(x);
1131   CodeStub* stub;
1132   CodeEmitInfo* info_for_exception = state_for(x);
1133 
1134   if (x->is_incompatible_class_change_check()) {
1135     assert(patching_info == NULL, "can't patch this");
1136     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1137                                    LIR_OprFact::illegalOpr, info_for_exception);
1138   } else {
1139     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1140   }
1141   // Following registers are used by slow_subtype_check:
1142   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1143   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1144   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1145   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1146                x->direct_compare(), info_for_exception, patching_info, stub,
1147                x->profiled_method(), x->profiled_bci());
1148 }
1149 
1150 
1151 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1152   LIRItem obj(x->obj(), this);
1153   CodeEmitInfo* patching_info = NULL;
1154   if (!x->klass()->is_loaded() || PatchALot) {
1155     patching_info = state_for(x, x->state_before());
1156   }
1157   // Ensure the result register is not the input register because the
1158   // result is initialized before the patching safepoint.
1159   obj.load_item();
1160   LIR_Opr out_reg = rlock_result(x);
1161   // Following registers are used by slow_subtype_check:
1162   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1163   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1164   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1165   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1166                 x->direct_compare(), patching_info,
1167                 x->profiled_method(), x->profiled_bci());
1168 }
1169 
1170 
1171 void LIRGenerator::do_If(If* x) {
1172   assert(x->number_of_sux() == 2, "inconsistency");
1173   ValueTag tag = x->x()->type()->tag();
1174   LIRItem xitem(x->x(), this);
1175   LIRItem yitem(x->y(), this);
1176   LIRItem* xin = &xitem;
1177   LIRItem* yin = &yitem;
1178   If::Condition cond = x->cond();
1179 
1180   LIR_Opr left = LIR_OprFact::illegalOpr;
1181   LIR_Opr right = LIR_OprFact::illegalOpr;
1182 
1183   xin->load_item();
1184   left = xin->result();
1185 
1186   if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1187       Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1188     // Inline int constants which are small enough to be immediate operands.
1189     right = LIR_OprFact::value_type(yin->value()->type());
1190   } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1191              (cond == If::eql || cond == If::neq)) {
1192     // Inline long zero.
1193     right = LIR_OprFact::value_type(yin->value()->type());
1194   } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1195     right = LIR_OprFact::value_type(yin->value()->type());
1196   } else {
1197     yin->load_item();
1198     right = yin->result();
1199   }
1200   set_no_result(x);
1201 
1202   // Add safepoint before generating condition code so it can be recomputed.
1203   if (x->is_safepoint()) {
1204     // Increment backedge counter if needed.
1205     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1206     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1207   }
1208 
1209   __ cmp(lir_cond(cond), left, right);
1210   // Generate branch profiling. Profiling code doesn't kill flags.
1211   profile_branch(x, cond);
1212   move_to_phi(x->state());
1213   if (x->x()->type()->is_float_kind()) {
1214     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1215   } else {
1216     __ branch(lir_cond(cond), right->type(), x->tsux());
1217   }
1218   assert(x->default_sux() == x->fsux(), "wrong destination above");
1219   __ jump(x->default_sux());
1220 }
1221 
1222 
1223 LIR_Opr LIRGenerator::getThreadPointer() {
1224   return FrameMap::as_pointer_opr(R16_thread);
1225 }
1226 
1227 
1228 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1229   LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1230   __ move(LIR_OprFact::intConst(block->block_id()), arg1);
1231   LIR_OprList* args = new LIR_OprList(1);
1232   args->append(arg1);
1233   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1234   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1235 }
1236 
1237 
1238 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1239                                         CodeEmitInfo* info) {
1240 #ifdef _LP64
1241   __ store(value, address, info);
1242 #else
1243   Unimplemented();
1244 //  __ volatile_store_mem_reg(value, address, info);
1245 #endif
1246 }
1247 
1248 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1249                                        CodeEmitInfo* info) {
1250 #ifdef _LP64
1251   __ load(address, result, info);
1252 #else
1253   Unimplemented();
1254 //  __ volatile_load_mem_reg(address, result, info);
1255 #endif
1256 }
1257 
1258 
1259 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1260                                      BasicType type, bool is_volatile) {
1261   LIR_Opr base_op = src;
1262   LIR_Opr index_op = offset;
1263 
1264   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1265 #ifndef _LP64
1266   if (is_volatile && type == T_LONG) {
1267     __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
1268   } else
1269 #endif
1270   {
1271     if (type == T_BOOLEAN) {
1272       type = T_BYTE;
1273     }
1274     LIR_Address* addr;
1275     if (type == T_ARRAY || type == T_OBJECT) {
1276       LIR_Opr tmp = new_pointer_register();
1277       __ add(base_op, index_op, tmp);
1278       addr = new LIR_Address(tmp, type);
1279     } else {
1280       addr = new LIR_Address(base_op, index_op, type);
1281     }
1282 
1283     if (is_obj) {
1284       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1285           true /* do_load */, false /* patch */, NULL);
1286       // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
1287     }
1288     __ move(data, addr);
1289     if (is_obj) {
1290       // This address is precise.
1291       post_barrier(LIR_OprFact::address(addr), data);
1292     }
1293   }
1294 }
1295 
1296 
1297 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1298                                      BasicType type, bool is_volatile) {
1299 #ifndef _LP64
1300   if (is_volatile && type == T_LONG) {
1301     __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
1302   } else
1303 #endif
1304     {
1305     LIR_Address* addr = new LIR_Address(src, offset, type);
1306     __ load(addr, dst);
1307   }
1308 }
1309 
1310 
1311 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1312   BasicType type = x->basic_type();
1313   LIRItem src(x->object(), this);
1314   LIRItem off(x->offset(), this);
1315   LIRItem value(x->value(), this);
1316 
1317   src.load_item();
1318   value.load_item();
1319   off.load_nonconstant();
1320 
1321   LIR_Opr dst = rlock_result(x, type);
1322   LIR_Opr data = value.result();
1323   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1324 
1325   LIR_Opr tmp = FrameMap::R0_opr;
1326   LIR_Opr ptr = new_pointer_register();
1327   __ add(src.result(), off.result(), ptr);
1328 
1329   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1330     __ membar();
1331   } else {
1332     __ membar_release();
1333   }
1334 
1335   if (x->is_add()) {
1336     __ xadd(ptr, data, dst, tmp);
1337   } else {
1338     const bool can_move_barrier = true; // TODO: port GraphKit::can_move_pre_barrier() from C2
1339     if (!can_move_barrier && is_obj) {
1340       // Do the pre-write barrier, if any.
1341       pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1342                   true /* do_load */, false /* patch */, NULL);
1343     }
1344     __ xchg(ptr, data, dst, tmp);
1345     if (is_obj) {
1346       // Seems to be a precise address.
1347       post_barrier(ptr, data);
1348       if (can_move_barrier) {
1349         pre_barrier(LIR_OprFact::illegalOpr, dst /* pre_val */,
1350                     false /* do_load */, false /* patch */, NULL);
1351       }
1352     }
1353   }
1354 
1355   __ membar();
1356 }
1357 
1358 
1359 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1360   assert(UseCRC32Intrinsics, "or should not be here");
1361   LIR_Opr result = rlock_result(x);
1362 
1363   switch (x->id()) {
1364     case vmIntrinsics::_updateCRC32: {
1365       LIRItem crc(x->argument_at(0), this);
1366       LIRItem val(x->argument_at(1), this);
1367       // Registers destroyed by update_crc32.
1368       crc.set_destroys_register();
1369       val.set_destroys_register();
1370       crc.load_item();
1371       val.load_item();
1372       __ update_crc32(crc.result(), val.result(), result);
1373       break;
1374     }
1375     case vmIntrinsics::_updateBytesCRC32:
1376     case vmIntrinsics::_updateByteBufferCRC32: {
1377       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1378 
1379       LIRItem crc(x->argument_at(0), this);
1380       LIRItem buf(x->argument_at(1), this);
1381       LIRItem off(x->argument_at(2), this);
1382       LIRItem len(x->argument_at(3), this);
1383       buf.load_item();
1384       off.load_nonconstant();
1385 
1386       LIR_Opr index = off.result();
1387       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1388       if (off.result()->is_constant()) {
1389         index = LIR_OprFact::illegalOpr;
1390         offset += off.result()->as_jint();
1391       }
1392       LIR_Opr base_op = buf.result();
1393       LIR_Address* a = NULL;
1394 
1395       if (index->is_valid()) {
1396         LIR_Opr tmp = new_register(T_LONG);
1397         __ convert(Bytecodes::_i2l, index, tmp);
1398         index = tmp;
1399         __ add(index, LIR_OprFact::intptrConst(offset), index);
1400         a = new LIR_Address(base_op, index, T_BYTE);
1401       } else {
1402         a = new LIR_Address(base_op, offset, T_BYTE);
1403       }
1404 
1405       BasicTypeList signature(3);
1406       signature.append(T_INT);
1407       signature.append(T_ADDRESS);
1408       signature.append(T_INT);
1409       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1410       const LIR_Opr result_reg = result_register_for(x->type());
1411 
1412       LIR_Opr arg1 = cc->at(0),
1413               arg2 = cc->at(1),
1414               arg3 = cc->at(2);
1415 
1416       // CCallingConventionRequiresIntsAsLongs
1417       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1418       __ leal(LIR_OprFact::address(a), arg2);
1419       load_int_as_long(gen()->lir(), len, arg3);
1420 
1421       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1422       __ move(result_reg, result);
1423       break;
1424     }
1425     default: {
1426       ShouldNotReachHere();
1427     }
1428   }
1429 }