1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "vmreg_s390.inline.hpp"
  40 
  41 #ifdef ASSERT
  42 #define __ gen()->lir(__FILE__, __LINE__)->
  43 #else
  44 #define __ gen()->lir()->
  45 #endif
  46 
  47 void LIRItem::load_byte_item() {
  48   // Byte loads use same registers as other loads.
  49   load_item();
  50 }
  51 
  52 void LIRItem::load_nonconstant(int bits) {
  53   LIR_Opr r = value()->operand();
  54   if (_gen->can_inline_as_constant(value(), bits)) {
  55     if (!r->is_constant()) {
  56       r = LIR_OprFact::value_type(value()->type());
  57     }
  58     _result = r;
  59   } else {
  60     load_item();
  61   }
  62 }
  63 
  64 //--------------------------------------------------------------
  65 //               LIRGenerator
  66 //--------------------------------------------------------------
  67 
  68 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::as_oop_opr(Z_EXC_OOP); }
  69 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::as_opr(Z_EXC_PC); }
  70 LIR_Opr LIRGenerator::divInOpr()        { return FrameMap::Z_R11_opr; }
  71 LIR_Opr LIRGenerator::divOutOpr()       { return FrameMap::Z_R11_opr; }
  72 LIR_Opr LIRGenerator::remOutOpr()       { return FrameMap::Z_R10_opr; }
  73 LIR_Opr LIRGenerator::ldivInOpr()       { return FrameMap::Z_R11_long_opr; }
  74 LIR_Opr LIRGenerator::ldivOutOpr()      { return FrameMap::Z_R11_long_opr; }
  75 LIR_Opr LIRGenerator::lremOutOpr()      { return FrameMap::Z_R10_long_opr; }
  76 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  77 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::Z_R13_opr; }
  78 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  79 
  80 LIR_Opr LIRGenerator::result_register_for (ValueType* type, bool callee) {
  81   LIR_Opr opr;
  82   switch (type->tag()) {
  83     case intTag:    opr = FrameMap::Z_R2_opr;        break;
  84     case objectTag: opr = FrameMap::Z_R2_oop_opr;    break;
  85     case longTag:   opr = FrameMap::Z_R2_long_opr;   break;
  86     case floatTag:  opr = FrameMap::Z_F0_opr;        break;
  87     case doubleTag: opr = FrameMap::Z_F0_double_opr; break;
  88 
  89     case addressTag:
  90     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  91   }
  92 
  93   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  94   return opr;
  95 }
  96 
  97 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  98   return new_register(T_INT);
  99 }
 100 
 101 //--------- Loading items into registers. --------------------------------
 102 
 103 // z/Architecture cannot inline all constants.
 104 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 105   if (v->type()->as_IntConstant() != NULL) {
 106     return Immediate::is_simm16(v->type()->as_IntConstant()->value());
 107   } else if (v->type()->as_LongConstant() != NULL) {
 108     return Immediate::is_simm16(v->type()->as_LongConstant()->value());
 109   } else if (v->type()->as_ObjectConstant() != NULL) {
 110     return v->type()->as_ObjectConstant()->value()->is_null_object();
 111   } else {
 112     return false;
 113   }
 114 }
 115 
 116 bool LIRGenerator::can_inline_as_constant(Value i, int bits) const {
 117   if (i->type()->as_IntConstant() != NULL) {
 118     return Assembler::is_simm(i->type()->as_IntConstant()->value(), bits);
 119   } else if (i->type()->as_LongConstant() != NULL) {
 120     return Assembler::is_simm(i->type()->as_LongConstant()->value(), bits);
 121   } else {
 122     return can_store_as_constant(i, as_BasicType(i->type()));
 123   }
 124 }
 125 
 126 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 127   if (c->type() == T_INT) {
 128     return Immediate::is_simm20(c->as_jint());
 129   } else   if (c->type() == T_LONG) {
 130     return Immediate::is_simm20(c->as_jlong());
 131   }
 132   return false;
 133 }
 134 
 135 LIR_Opr LIRGenerator::safepoint_poll_register() {
 136   return new_register(longType);
 137 }
 138 
 139 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 140                                             int shift, int disp, BasicType type) {
 141   assert(base->is_register(), "must be");
 142   if (index->is_constant()) {
 143     intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
 144     if (Displacement::is_validDisp(large_disp)) {
 145       return new LIR_Address(base, large_disp, type);
 146     }
 147     // Index is illegal so replace it with the displacement loaded into a register.
 148     index = new_pointer_register();
 149     __ move(LIR_OprFact::intptrConst(large_disp), index);
 150     return new LIR_Address(base, index, type);
 151   } else {
 152     if (shift > 0) {
 153       LIR_Opr tmp = new_pointer_register();
 154       __ shift_left(index, shift, tmp);
 155       index = tmp;
 156     }
 157     return new LIR_Address(base, index, disp, type);
 158   }
 159 }
 160 
 161 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 162                                               BasicType type, bool needs_card_mark) {
 163   int elem_size = type2aelembytes(type);
 164   int shift = exact_log2(elem_size);
 165   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 166 
 167   LIR_Address* addr;
 168   if (index_opr->is_constant()) {
 169     addr = new LIR_Address(array_opr,
 170                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 171   } else {
 172     if (index_opr->type() == T_INT) {
 173       LIR_Opr tmp = new_register(T_LONG);
 174       __ convert(Bytecodes::_i2l, index_opr, tmp);
 175       index_opr = tmp;
 176     }
 177     if (shift > 0) {
 178       __ shift_left(index_opr, shift, index_opr);
 179     }
 180     addr = new LIR_Address(array_opr,
 181                            index_opr,
 182                            offset_in_bytes, type);
 183   }
 184   if (needs_card_mark) {
 185     // This store will need a precise card mark, so go ahead and
 186     // compute the full adddres instead of computing once for the
 187     // store and again for the card mark.
 188     LIR_Opr tmp = new_pointer_register();
 189     __ leal(LIR_OprFact::address(addr), tmp);
 190     return new LIR_Address(tmp, type);
 191   } else {
 192     return addr;
 193   }
 194 }
 195 
 196 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 197   LIR_Opr r = LIR_OprFact::illegalOpr;
 198   if (type == T_LONG) {
 199     r = LIR_OprFact::longConst(x);
 200   } else if (type == T_INT) {
 201     r = LIR_OprFact::intConst(x);
 202   } else {
 203     ShouldNotReachHere();
 204   }
 205   return r;
 206 }
 207 
 208 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 209   LIR_Opr pointer = new_pointer_register();
 210   __ move(LIR_OprFact::intptrConst(counter), pointer);
 211   LIR_Address* addr = new LIR_Address(pointer, type);
 212   increment_counter(addr, step);
 213 }
 214 
 215 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 216   __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
 217 }
 218 
 219 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 220   LIR_Opr scratch = FrameMap::Z_R1_opr;
 221   __ load(new LIR_Address(base, disp, T_INT), scratch, info);
 222   __ cmp(condition, scratch, c);
 223 }
 224 
 225 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 226   __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
 227 }
 228 
 229 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 230   if (tmp->is_valid()) {
 231     if (is_power_of_2(c + 1)) {
 232       __ move(left, tmp);
 233       __ shift_left(left, log2_intptr(c + 1), left);
 234       __ sub(left, tmp, result);
 235       return true;
 236     } else if (is_power_of_2(c - 1)) {
 237       __ move(left, tmp);
 238       __ shift_left(left, log2_intptr(c - 1), left);
 239       __ add(left, tmp, result);
 240       return true;
 241     }
 242   }
 243   return false;
 244 }
 245 
 246 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 247   BasicType type = item->type();
 248   __ store(item, new LIR_Address(FrameMap::Z_SP_opr, in_bytes(offset_from_sp), type));
 249 }
 250 
 251 //----------------------------------------------------------------------
 252 //             visitor functions
 253 //----------------------------------------------------------------------
 254 
 255 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 256   assert(x->is_pinned(),"");
 257   bool needs_range_check = x->compute_needs_range_check();
 258   bool use_length = x->length() != NULL;
 259   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 260   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 261                                          !get_jobject_constant(x->value())->is_null_object() ||
 262                                          x->should_profile());
 263 
 264   LIRItem array(x->array(), this);
 265   LIRItem index(x->index(), this);
 266   LIRItem value(x->value(), this);
 267   LIRItem length(this);
 268 
 269   array.load_item();
 270   index.load_nonconstant(20);
 271 
 272   if (use_length && needs_range_check) {
 273     length.set_instruction(x->length());
 274     length.load_item();
 275   }
 276   if (needs_store_check || x->check_boolean()) {
 277     value.load_item();
 278   } else {
 279     value.load_for_store(x->elt_type());
 280   }
 281 
 282   set_no_result(x);
 283 
 284   // The CodeEmitInfo must be duplicated for each different
 285   // LIR-instruction because spilling can occur anywhere between two
 286   // instructions and so the debug information must be different.
 287   CodeEmitInfo* range_check_info = state_for (x);
 288   CodeEmitInfo* null_check_info = NULL;
 289   if (x->needs_null_check()) {
 290     null_check_info = new CodeEmitInfo(range_check_info);
 291   }
 292 
 293   // Emit array address setup early so it schedules better.
 294   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 295   if (value.result()->is_constant() && array_addr->index()->is_valid()) {
 296     // Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
 297     LIR_Opr tmp = new_pointer_register();
 298     __ leal(LIR_OprFact::address(array_addr), tmp);
 299     array_addr = new LIR_Address(tmp, x->elt_type());
 300   }
 301 
 302   if (GenerateRangeChecks && needs_range_check) {
 303     if (use_length) {
 304       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 305       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 306     } else {
 307       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 308       // Range_check also does the null check.
 309       null_check_info = NULL;
 310     }
 311   }
 312 
 313   if (GenerateArrayStoreCheck && needs_store_check) {
 314     LIR_Opr tmp1 = new_register(objectType);
 315     LIR_Opr tmp2 = new_register(objectType);
 316     LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 317 
 318     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 319     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 320   }
 321 
 322   if (obj_store) {
 323     // Needs GC write barriers.
 324     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 325                 true /* do_load */, false /* patch */, NULL);
 326   }
 327 
 328   LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 329   __ move(result, array_addr, null_check_info);
 330 
 331   if (obj_store) {
 332     // Precise card mark
 333     post_barrier(LIR_OprFact::address(array_addr), value.result());
 334   }
 335 }
 336 
 337 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 338   assert(x->is_pinned(),"");
 339   LIRItem obj(x->obj(), this);
 340   obj.load_item();
 341 
 342   set_no_result(x);
 343 
 344   // "lock" stores the address of the monitor stack slot, so this is not an oop.
 345   LIR_Opr lock = new_register(T_INT);
 346 
 347   CodeEmitInfo* info_for_exception = NULL;
 348   if (x->needs_null_check()) {
 349     info_for_exception = state_for (x);
 350   }
 351   // This CodeEmitInfo must not have the xhandlers because here the
 352   // object is already locked (xhandlers expect object to be unlocked).
 353   CodeEmitInfo* info = state_for (x, x->state(), true);
 354   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 355                 x->monitor_no(), info_for_exception, info);
 356 }
 357 
 358 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 359   assert(x->is_pinned(),"");
 360 
 361   LIRItem obj(x->obj(), this);
 362   obj.dont_load_item();
 363 
 364   LIR_Opr lock = new_register(T_INT);
 365   LIR_Opr obj_temp = new_register(T_INT);
 366   set_no_result(x);
 367   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 368 }
 369 
 370 // _ineg, _lneg, _fneg, _dneg
 371 void LIRGenerator::do_NegateOp(NegateOp* x) {
 372   LIRItem value(x->x(), this);
 373   value.load_item();
 374   LIR_Opr reg = rlock_result(x);
 375   __ negate(value.result(), reg);
 376 }
 377 
 378 // for _fadd, _fmul, _fsub, _fdiv, _frem
 379 //     _dadd, _dmul, _dsub, _ddiv, _drem
 380 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 381   LIRItem left(x->x(),  this);
 382   LIRItem right(x->y(), this);
 383   LIRItem* left_arg  = &left;
 384   LIRItem* right_arg = &right;
 385   assert(!left.is_stack(), "can't both be memory operands");
 386   left.load_item();
 387 
 388   if (right.is_register() || right.is_constant()) {
 389     right.load_item();
 390   } else {
 391     right.dont_load_item();
 392   }
 393 
 394   if ((x->op() == Bytecodes::_frem) || (x->op() == Bytecodes::_drem)) {
 395     address entry;
 396     switch (x->op()) {
 397     case Bytecodes::_frem:
 398       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 399       break;
 400     case Bytecodes::_drem:
 401       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 402       break;
 403     default:
 404       ShouldNotReachHere();
 405     }
 406     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
 407     set_result(x, result);
 408   } else {
 409     LIR_Opr reg = rlock(x);
 410     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 411     arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp);
 412     set_result(x, reg);
 413   }
 414 }
 415 
 416 // for _ladd, _lmul, _lsub, _ldiv, _lrem
 417 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 418   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 419     // Use shifts if divisior is a power of 2 otherwise use DSGR instruction.
 420     // Instruction: DSGR R1, R2
 421     // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
 422     //         R2:   divisor
 423     //
 424     // output: R1+1: quotient
 425     //         R1:   remainder
 426     //
 427     // Register selection: R1:   Z_R10
 428     //                     R1+1: Z_R11
 429     //                     R2:   to be chosen by register allocator (linear scan)
 430 
 431     // R1, and R1+1 will be destroyed.
 432 
 433     LIRItem right(x->y(), this);
 434     LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
 435 
 436     // Call state_for before load_item_force because state_for may
 437     // force the evaluation of other instructions that are needed for
 438     // correct debug info. Otherwise the live range of the fix
 439     // register might be too long.
 440     CodeEmitInfo* info = state_for (x);
 441 
 442     LIR_Opr result = rlock_result(x);
 443     LIR_Opr result_reg = result;
 444     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 445     LIR_Opr divisor_opr = right.result();
 446     if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jlong())) {
 447       left.load_item();
 448       right.dont_load_item();
 449     } else {
 450       left.load_item_force(ldivInOpr());
 451       right.load_item();
 452 
 453       // DSGR instruction needs register pair.
 454       if (x->op() == Bytecodes::_ldiv) {
 455         result_reg = ldivOutOpr();
 456         tmp        = lremOutOpr();
 457       } else {
 458         result_reg = lremOutOpr();
 459         tmp        = ldivOutOpr();
 460       }
 461     }
 462 
 463     if (!ImplicitDiv0Checks) {
 464       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 465       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 466       // Idiv/irem cannot trap (passing info would generate an assertion).
 467       info = NULL;
 468     }
 469 
 470     if (x->op() == Bytecodes::_lrem) {
 471       __ irem(left.result(), right.result(), result_reg, tmp, info);
 472     } else if (x->op() == Bytecodes::_ldiv) {
 473       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 474     } else {
 475       ShouldNotReachHere();
 476     }
 477 
 478     if (result_reg != result) {
 479       __ move(result_reg, result);
 480     }
 481   } else {
 482     LIRItem left(x->x(), this);
 483     LIRItem right(x->y(), this);
 484 
 485     left.load_item();
 486     right.load_nonconstant(32);
 487     rlock_result(x);
 488     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 489   }
 490 }
 491 
 492 // for: _iadd, _imul, _isub, _idiv, _irem
 493 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 494   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 495     // Use shifts if divisior is a power of 2 otherwise use DSGFR instruction.
 496     // Instruction: DSGFR R1, R2
 497     // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
 498     //         R2:   divisor
 499     //
 500     // output: R1+1: quotient
 501     //         R1:   remainder
 502     //
 503     // Register selection: R1:   Z_R10
 504     //                     R1+1: Z_R11
 505     //                     R2:   To be chosen by register allocator (linear scan).
 506 
 507     // R1, and R1+1 will be destroyed.
 508 
 509     LIRItem right(x->y(), this);
 510     LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
 511 
 512     // Call state_for before load_item_force because state_for may
 513     // force the evaluation of other instructions that are needed for
 514     // correct debug info. Otherwise the live range of the fix
 515     // register might be too long.
 516     CodeEmitInfo* info = state_for (x);
 517 
 518     LIR_Opr result = rlock_result(x);
 519     LIR_Opr result_reg = result;
 520     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 521     LIR_Opr divisor_opr = right.result();
 522     if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jint())) {
 523       left.load_item();
 524       right.dont_load_item();
 525     } else {
 526       left.load_item_force(divInOpr());
 527       right.load_item();
 528 
 529       // DSGFR instruction needs register pair.
 530       if (x->op() == Bytecodes::_idiv) {
 531         result_reg = divOutOpr();
 532         tmp        = remOutOpr();
 533       } else {
 534         result_reg = remOutOpr();
 535         tmp        = divOutOpr();
 536       }
 537     }
 538 
 539     if (!ImplicitDiv0Checks) {
 540       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
 541       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
 542       // Idiv/irem cannot trap (passing info would generate an assertion).
 543       info = NULL;
 544     }
 545 
 546     if (x->op() == Bytecodes::_irem) {
 547       __ irem(left.result(), right.result(), result_reg, tmp, info);
 548     } else if (x->op() == Bytecodes::_idiv) {
 549       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 550     } else {
 551       ShouldNotReachHere();
 552     }
 553 
 554     if (result_reg != result) {
 555       __ move(result_reg, result);
 556     }
 557   } else {
 558     LIRItem left(x->x(),  this);
 559     LIRItem right(x->y(), this);
 560     LIRItem* left_arg = &left;
 561     LIRItem* right_arg = &right;
 562     if (x->is_commutative() && left.is_stack() && right.is_register()) {
 563       // swap them if left is real stack (or cached) and right is real register(not cached)
 564       left_arg = &right;
 565       right_arg = &left;
 566     }
 567 
 568     left_arg->load_item();
 569 
 570     // Do not need to load right, as we can handle stack and constants.
 571     if (x->op() == Bytecodes::_imul) {
 572       bool use_tmp = false;
 573       if (right_arg->is_constant()) {
 574         int iconst = right_arg->get_jint_constant();
 575         if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
 576           use_tmp = true;
 577         }
 578       }
 579       right_arg->dont_load_item();
 580       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 581       if (use_tmp) {
 582         tmp = new_register(T_INT);
 583       }
 584       rlock_result(x);
 585 
 586       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 587     } else {
 588       right_arg->dont_load_item();
 589       rlock_result(x);
 590       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 591       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 592     }
 593   }
 594 }
 595 
 596 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 597   // If an operand with use count 1 is the left operand, then it is
 598   // likely that no move for 2-operand-LIR-form is necessary.
 599   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 600     x->swap_operands();
 601   }
 602 
 603   ValueTag tag = x->type()->tag();
 604   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 605   switch (tag) {
 606     case floatTag:
 607     case doubleTag: do_ArithmeticOp_FPU(x);  return;
 608     case longTag:   do_ArithmeticOp_Long(x); return;
 609     case intTag:    do_ArithmeticOp_Int(x);  return;
 610   }
 611   ShouldNotReachHere();
 612 }
 613 
 614 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 615 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 616   // count must always be in rcx
 617   LIRItem value(x->x(), this);
 618   LIRItem count(x->y(), this);
 619 
 620   ValueTag elemType = x->type()->tag();
 621   bool must_load_count = !count.is_constant();
 622   if (must_load_count) {
 623     count.load_item();
 624   } else {
 625     count.dont_load_item();
 626   }
 627   value.load_item();
 628   LIR_Opr reg = rlock_result(x);
 629 
 630   shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
 631 }
 632 
 633 // _iand, _land, _ior, _lor, _ixor, _lxor
 634 void LIRGenerator::do_LogicOp(LogicOp* x) {
 635   // IF an operand with use count 1 is the left operand, then it is
 636   // likely that no move for 2-operand-LIR-form is necessary.
 637   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 638     x->swap_operands();
 639   }
 640 
 641   LIRItem left(x->x(), this);
 642   LIRItem right(x->y(), this);
 643 
 644   left.load_item();
 645   right.load_nonconstant(32);
 646   LIR_Opr reg = rlock_result(x);
 647 
 648   logic_op(x->op(), reg, left.result(), right.result());
 649 }
 650 
 651 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 652 void LIRGenerator::do_CompareOp(CompareOp* x) {
 653   LIRItem left(x->x(), this);
 654   LIRItem right(x->y(), this);
 655   left.load_item();
 656   right.load_item();
 657   LIR_Opr reg = rlock_result(x);
 658   if (x->x()->type()->is_float_kind()) {
 659     Bytecodes::Code code = x->op();
 660     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 661   } else if (x->x()->type()->tag() == longTag) {
 662     __ lcmp2int(left.result(), right.result(), reg);
 663   } else {
 664     ShouldNotReachHere();
 665   }
 666 }
 667 
 668 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 669   assert(x->number_of_arguments() == 4, "wrong type");
 670   LIRItem obj   (x->argument_at(0), this);  // object
 671   LIRItem offset(x->argument_at(1), this);  // offset of field
 672   LIRItem cmp   (x->argument_at(2), this);  // Value to compare with field.
 673   LIRItem val   (x->argument_at(3), this);  // Replace field with val if matches cmp.
 674 
 675   // Get address of field.
 676   obj.load_item();
 677   offset.load_nonconstant(20);
 678   cmp.load_item();
 679   val.load_item();
 680 
 681   LIR_Opr addr = new_pointer_register();
 682   LIR_Address* a;
 683   if (offset.result()->is_constant()) {
 684     assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
 685     a = new LIR_Address(obj.result(),
 686                         offset.result()->as_jlong(),
 687                         as_BasicType(type));
 688   } else {
 689     a = new LIR_Address(obj.result(),
 690                         offset.result(),
 691                         0,
 692                         as_BasicType(type));
 693   }
 694   __ leal(LIR_OprFact::address(a), addr);
 695 
 696   if (type == objectType) {  // Write-barrier needed for Object fields.
 697     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 698                 true /* do_load */, false /* patch */, NULL);
 699   }
 700 
 701   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 702   if (type == objectType) {
 703     __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
 704   } else if (type == intType) {
 705     __ cas_int(addr, cmp.result(), val.result(), ill, ill);
 706   } else if (type == longType) {
 707     __ cas_long(addr, cmp.result(), val.result(), ill, ill);
 708   } else {
 709     ShouldNotReachHere();
 710   }
 711   // Generate conditional move of boolean result.
 712   LIR_Opr result = rlock_result(x);
 713   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 714            result, as_BasicType(type));
 715   if (type == objectType) {  // Write-barrier needed for Object fields.
 716     // Precise card mark since could either be object or array
 717     post_barrier(addr, val.result());
 718   }
 719 }
 720 
 721 
 722 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 723   switch (x->id()) {
 724     case vmIntrinsics::_dabs:
 725     case vmIntrinsics::_dsqrt: {
 726       assert(x->number_of_arguments() == 1, "wrong type");
 727       LIRItem value(x->argument_at(0), this);
 728       value.load_item();
 729       LIR_Opr dst = rlock_result(x);
 730 
 731       switch (x->id()) {
 732       case vmIntrinsics::_dsqrt: {
 733         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 734         break;
 735       }
 736       case vmIntrinsics::_dabs: {
 737         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 738         break;
 739       }
 740       }
 741       break;
 742     }
 743     case vmIntrinsics::_dlog10: // fall through
 744     case vmIntrinsics::_dlog: // fall through
 745     case vmIntrinsics::_dsin: // fall through
 746     case vmIntrinsics::_dtan: // fall through
 747     case vmIntrinsics::_dcos: // fall through
 748     case vmIntrinsics::_dexp: {
 749       assert(x->number_of_arguments() == 1, "wrong type");
 750 
 751       address runtime_entry = NULL;
 752       switch (x->id()) {
 753       case vmIntrinsics::_dsin:
 754         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 755         break;
 756       case vmIntrinsics::_dcos:
 757         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 758         break;
 759       case vmIntrinsics::_dtan:
 760         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 761         break;
 762       case vmIntrinsics::_dlog:
 763         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 764         break;
 765       case vmIntrinsics::_dlog10:
 766         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 767         break;
 768       case vmIntrinsics::_dexp:
 769         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 770         break;
 771       default:
 772         ShouldNotReachHere();
 773       }
 774 
 775       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 776       set_result(x, result);
 777       break;
 778     }
 779     case vmIntrinsics::_dpow: {
 780       assert(x->number_of_arguments() == 2, "wrong type");
 781       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 782       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 783       set_result(x, result);
 784       break;
 785     }
 786   }
 787 }
 788 
 789 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 790   assert(x->number_of_arguments() == 5, "wrong type");
 791 
 792   // Copy stubs possibly call C code, e.g. G1 barriers, so we need to reserve room
 793   // for the C ABI (see frame::z_abi_160).
 794   BasicTypeArray sig; // Empty signature is precise enough.
 795   frame_map()->c_calling_convention(&sig);
 796 
 797   // Make all state_for calls early since they can emit code.
 798   CodeEmitInfo* info = state_for (x, x->state());
 799 
 800   LIRItem src(x->argument_at(0), this);
 801   LIRItem src_pos(x->argument_at(1), this);
 802   LIRItem dst(x->argument_at(2), this);
 803   LIRItem dst_pos(x->argument_at(3), this);
 804   LIRItem length(x->argument_at(4), this);
 805 
 806   // Operands for arraycopy must use fixed registers, otherwise
 807   // LinearScan will fail allocation (because arraycopy always needs a
 808   // call).
 809 
 810   src.load_item_force     (FrameMap::as_oop_opr(Z_ARG1));
 811   src_pos.load_item_force (FrameMap::as_opr(Z_ARG2));
 812   dst.load_item_force     (FrameMap::as_oop_opr(Z_ARG3));
 813   dst_pos.load_item_force (FrameMap::as_opr(Z_ARG4));
 814   length.load_item_force  (FrameMap::as_opr(Z_ARG5));
 815 
 816   LIR_Opr tmp =            FrameMap::as_opr(Z_R7);
 817 
 818   set_no_result(x);
 819 
 820   int flags;
 821   ciArrayKlass* expected_type;
 822   arraycopy_helper(x, &flags, &expected_type);
 823 
 824   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
 825                length.result(), tmp, expected_type, flags, info); // does add_safepoint
 826 }
 827 
 828 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 829 // _i2b, _i2c, _i2s
 830 void LIRGenerator::do_Convert(Convert* x) {
 831   LIRItem value(x->value(), this);
 832 
 833   value.load_item();
 834   LIR_Opr reg = rlock_result(x);
 835   __ convert(x->op(), value.result(), reg);
 836 }
 837 
 838 void LIRGenerator::do_NewInstance(NewInstance* x) {
 839   print_if_not_loaded(x);
 840 
 841   // This instruction can be deoptimized in the slow path : use
 842   // Z_R2 as result register.
 843   const LIR_Opr reg = result_register_for (x->type());
 844 
 845   CodeEmitInfo* info = state_for (x, x->state());
 846   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 847   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 848   LIR_Opr tmp3 = reg;
 849   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 850   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 851   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
 852   LIR_Opr result = rlock_result(x);
 853   __ move(reg, result);
 854 }
 855 
 856 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 857   CodeEmitInfo* info = state_for (x, x->state());
 858 
 859   LIRItem length(x->length(), this);
 860   length.load_item();
 861 
 862   LIR_Opr reg = result_register_for (x->type());
 863   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 864   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 865   LIR_Opr tmp3 = reg;
 866   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 867   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 868   LIR_Opr len = length.result();
 869   BasicType elem_type = x->elt_type();
 870 
 871   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 872 
 873   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 874   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 875 
 876   LIR_Opr result = rlock_result(x);
 877   __ move(reg, result);
 878 }
 879 
 880 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 881   // Evaluate state_for early since it may emit code.
 882   CodeEmitInfo* info = state_for (x, x->state());
 883   // In case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
 884   // and therefore provide the state before the parameters have been consumed.
 885   CodeEmitInfo* patching_info = NULL;
 886   if (!x->klass()->is_loaded() || PatchALot) {
 887     patching_info = state_for (x, x->state_before());
 888   }
 889 
 890   LIRItem length(x->length(), this);
 891   length.load_item();
 892 
 893   const LIR_Opr reg = result_register_for (x->type());
 894   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 895   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 896   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 897   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 898   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 899   LIR_Opr len = length.result();
 900 
 901   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 902   ciKlass* obj = ciObjArrayKlass::make(x->klass());
 903   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 904     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 905   }
 906   klass2reg_with_patching(klass_reg, obj, patching_info);
 907   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 908 
 909   LIR_Opr result = rlock_result(x);
 910   __ move(reg, result);
 911 }
 912 
 913 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 914   Values* dims = x->dims();
 915   int i = dims->length();
 916   LIRItemList* items = new LIRItemList(i, i, NULL);
 917   while (i-- > 0) {
 918     LIRItem* size = new LIRItem(dims->at(i), this);
 919     items->at_put(i, size);
 920   }
 921 
 922   // Evaluate state_for early since it may emit code.
 923   CodeEmitInfo* patching_info = NULL;
 924   if (!x->klass()->is_loaded() || PatchALot) {
 925     patching_info = state_for (x, x->state_before());
 926 
 927     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 928     // clone all handlers (NOTE: Usually this is handled transparently
 929     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 930     // is done explicitly here because a stub isn't being used).
 931     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 932   }
 933   CodeEmitInfo* info = state_for (x, x->state());
 934 
 935   i = dims->length();
 936   while (--i >= 0) {
 937     LIRItem* size = items->at(i);
 938     size->load_nonconstant(32);
 939     // FrameMap::_reserved_argument_area_size includes the dimensions varargs, because
 940     // it's initialized to hir()->max_stack() when the FrameMap is created.
 941     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
 942   }
 943 
 944   LIR_Opr klass_reg = FrameMap::Z_R3_metadata_opr;
 945   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 946 
 947   LIR_Opr rank = FrameMap::Z_R4_opr;
 948   __ move(LIR_OprFact::intConst(x->rank()), rank);
 949   LIR_Opr varargs = FrameMap::Z_R5_opr;
 950   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::Z_SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
 951           varargs);
 952   LIR_OprList* args = new LIR_OprList(3);
 953   args->append(klass_reg);
 954   args->append(rank);
 955   args->append(varargs);
 956   LIR_Opr reg = result_register_for (x->type());
 957   __ call_runtime(Runtime1::entry_for (Runtime1::new_multi_array_id),
 958                   LIR_OprFact::illegalOpr,
 959                   reg, args, info);
 960 
 961   LIR_Opr result = rlock_result(x);
 962   __ move(reg, result);
 963 }
 964 
 965 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 966   // Nothing to do.
 967 }
 968 
 969 void LIRGenerator::do_CheckCast(CheckCast* x) {
 970   LIRItem obj(x->obj(), this);
 971 
 972   CodeEmitInfo* patching_info = NULL;
 973   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
 974     // Must do this before locking the destination register as an oop register,
 975     // and before the obj is loaded (the latter is for deoptimization).
 976     patching_info = state_for (x, x->state_before());
 977   }
 978   obj.load_item();
 979 
 980   // info for exceptions
 981   CodeEmitInfo* info_for_exception =
 982       (x->needs_exception_state() ? state_for(x) :
 983                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
 984 
 985   CodeStub* stub;
 986   if (x->is_incompatible_class_change_check()) {
 987     assert(patching_info == NULL, "can't patch this");
 988     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
 989   } else if (x->is_invokespecial_receiver_check()) {
 990     assert(patching_info == NULL, "can't patch this");
 991     stub = new DeoptimizeStub(info_for_exception,
 992                               Deoptimization::Reason_class_check,
 993                               Deoptimization::Action_none);
 994   } else {
 995     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
 996   }
 997   LIR_Opr reg = rlock_result(x);
 998   LIR_Opr tmp1 = new_register(objectType);
 999   LIR_Opr tmp2 = new_register(objectType);
1000   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1001   __ checkcast(reg, obj.result(), x->klass(),
1002                tmp1, tmp2, tmp3,
1003                x->direct_compare(), info_for_exception, patching_info, stub,
1004                x->profiled_method(), x->profiled_bci());
1005 }
1006 
1007 
1008 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1009   LIRItem obj(x->obj(), this);
1010   CodeEmitInfo* patching_info = NULL;
1011   if (!x->klass()->is_loaded() || PatchALot) {
1012     patching_info = state_for (x, x->state_before());
1013   }
1014   // Ensure the result register is not the input register because the
1015   // result is initialized before the patching safepoint.
1016   obj.load_item();
1017   LIR_Opr out_reg = rlock_result(x);
1018   LIR_Opr tmp1 = new_register(objectType);
1019   LIR_Opr tmp2 = new_register(objectType);
1020   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1021   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1022                 x->direct_compare(), patching_info,
1023                 x->profiled_method(), x->profiled_bci());
1024 }
1025 
1026 
1027 void LIRGenerator::do_If (If* x) {
1028   assert(x->number_of_sux() == 2, "inconsistency");
1029   ValueTag tag = x->x()->type()->tag();
1030   bool is_safepoint = x->is_safepoint();
1031 
1032   If::Condition cond = x->cond();
1033 
1034   LIRItem xitem(x->x(), this);
1035   LIRItem yitem(x->y(), this);
1036   LIRItem* xin = &xitem;
1037   LIRItem* yin = &yitem;
1038 
1039   if (tag == longTag) {
1040     // For longs, only conditions "eql", "neq", "lss", "geq" are valid;
1041     // mirror for other conditions.
1042     if (cond == If::gtr || cond == If::leq) {
1043       cond = Instruction::mirror(cond);
1044       xin = &yitem;
1045       yin = &xitem;
1046     }
1047     xin->set_destroys_register();
1048   }
1049   xin->load_item();
1050   // TODO: don't load long constants != 0L
1051   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1052     // inline long zero
1053     yin->dont_load_item();
1054   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1055     // Longs cannot handle constants at right side.
1056     yin->load_item();
1057   } else {
1058     yin->dont_load_item();
1059   }
1060 
1061   // Add safepoint before generating condition code so it can be recomputed.
1062   if (x->is_safepoint()) {
1063     // Increment backedge counter if needed.
1064     increment_backedge_counter(state_for (x, x->state_before()), x->profiled_bci());
1065     // Use safepoint_poll_register() instead of LIR_OprFact::illegalOpr.
1066     __ safepoint(safepoint_poll_register(), state_for (x, x->state_before()));
1067   }
1068   set_no_result(x);
1069 
1070   LIR_Opr left = xin->result();
1071   LIR_Opr right = yin->result();
1072   __ cmp(lir_cond(cond), left, right);
1073   // Generate branch profiling. Profiling code doesn't kill flags.
1074   profile_branch(x, cond);
1075   move_to_phi(x->state());
1076   if (x->x()->type()->is_float_kind()) {
1077     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1078   } else {
1079     __ branch(lir_cond(cond), right->type(), x->tsux());
1080   }
1081   assert(x->default_sux() == x->fsux(), "wrong destination above");
1082   __ jump(x->default_sux());
1083 }
1084 
1085 LIR_Opr LIRGenerator::getThreadPointer() {
1086   return FrameMap::as_pointer_opr(Z_thread);
1087 }
1088 
1089 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1090   __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::Z_R2_opr);
1091   LIR_OprList* args = new LIR_OprList(1);
1092   args->append(FrameMap::Z_R2_opr);
1093   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1094   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1095 }
1096 
1097 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1098                                         CodeEmitInfo* info) {
1099   __ store(value, address, info);
1100 }
1101 
1102 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1103                                        CodeEmitInfo* info) {
1104   __ load(address, result, info);
1105 }
1106 
1107 
1108 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1109                                      BasicType type, bool is_volatile) {
1110   LIR_Address* addr = new LIR_Address(src, offset, type);
1111   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1112   if (is_obj) {
1113     // Do the pre-write barrier, if any.
1114     pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1115                 true /* do_load */, false /* patch */, NULL);
1116     __ move(data, addr);
1117     assert(src->is_register(), "must be register");
1118     // Seems to be a precise address.
1119     post_barrier(LIR_OprFact::address(addr), data);
1120   } else {
1121     __ move(data, addr);
1122   }
1123 }
1124 
1125 
1126 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1127                                      BasicType type, bool is_volatile) {
1128   LIR_Address* addr = new LIR_Address(src, offset, type);
1129   __ load(addr, dst);
1130 }
1131 
1132 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1133   BasicType type = x->basic_type();
1134   assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
1135   LIRItem src(x->object(), this);
1136   LIRItem off(x->offset(), this);
1137   LIRItem value(x->value(), this);
1138 
1139   src.load_item();
1140   value.load_item();
1141   off.load_nonconstant(20);
1142 
1143   LIR_Opr dst = rlock_result(x, type);
1144   LIR_Opr data = value.result();
1145   LIR_Opr offset = off.result();
1146 
1147   LIR_Address* addr;
1148   if (offset->is_constant()) {
1149     assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
1150     addr = new LIR_Address(src.result(), offset->as_jlong(), type);
1151   } else {
1152     addr = new LIR_Address(src.result(), offset, type);
1153   }
1154 
1155   __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1156 }
1157 
1158 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1159   assert(UseCRC32Intrinsics, "or should not be here");
1160   LIR_Opr result = rlock_result(x);
1161 
1162   switch (x->id()) {
1163     case vmIntrinsics::_updateCRC32: {
1164       LIRItem crc(x->argument_at(0), this);
1165       LIRItem val(x->argument_at(1), this);
1166       // Registers destroyed by update_crc32.
1167       crc.set_destroys_register();
1168       val.set_destroys_register();
1169       crc.load_item();
1170       val.load_item();
1171       __ update_crc32(crc.result(), val.result(), result);
1172       break;
1173     }
1174     case vmIntrinsics::_updateBytesCRC32:
1175     case vmIntrinsics::_updateByteBufferCRC32: {
1176       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1177 
1178       LIRItem crc(x->argument_at(0), this);
1179       LIRItem buf(x->argument_at(1), this);
1180       LIRItem off(x->argument_at(2), this);
1181       LIRItem len(x->argument_at(3), this);
1182       buf.load_item();
1183       off.load_nonconstant();
1184 
1185       LIR_Opr index = off.result();
1186       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1187       if (off.result()->is_constant()) {
1188         index = LIR_OprFact::illegalOpr;
1189         offset += off.result()->as_jint();
1190       }
1191       LIR_Opr base_op = buf.result();
1192 
1193       if (index->is_valid()) {
1194         LIR_Opr tmp = new_register(T_LONG);
1195         __ convert(Bytecodes::_i2l, index, tmp);
1196         index = tmp;
1197       }
1198 
1199       LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
1200 
1201       BasicTypeList signature(3);
1202       signature.append(T_INT);
1203       signature.append(T_ADDRESS);
1204       signature.append(T_INT);
1205       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1206       const LIR_Opr result_reg = result_register_for (x->type());
1207 
1208       LIR_Opr arg1 = cc->at(0);
1209       LIR_Opr arg2 = cc->at(1);
1210       LIR_Opr arg3 = cc->at(2);
1211 
1212       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1213       __ leal(LIR_OprFact::address(a), arg2);
1214       len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
1215 
1216       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1217       __ move(result_reg, result);
1218       break;
1219     }
1220     default: {
1221       ShouldNotReachHere();
1222     }
1223   }
1224 }
1225 
1226 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1227   assert(UseCRC32CIntrinsics, "or should not be here");
1228   LIR_Opr result = rlock_result(x);
1229 
1230   switch (x->id()) {
1231     case vmIntrinsics::_updateBytesCRC32C:
1232     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1233       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1234 
1235       LIRItem crc(x->argument_at(0), this);
1236       LIRItem buf(x->argument_at(1), this);
1237       LIRItem off(x->argument_at(2), this);
1238       LIRItem end(x->argument_at(3), this);
1239       buf.load_item();
1240       off.load_nonconstant();
1241       end.load_nonconstant();
1242 
1243       // len = end - off
1244       LIR_Opr len  = end.result();
1245       LIR_Opr tmpA = new_register(T_INT);
1246       LIR_Opr tmpB = new_register(T_INT);
1247       __ move(end.result(), tmpA);
1248       __ move(off.result(), tmpB);
1249       __ sub(tmpA, tmpB, tmpA);
1250       len = tmpA;
1251 
1252       LIR_Opr index = off.result();
1253       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1254       if (off.result()->is_constant()) {
1255         index = LIR_OprFact::illegalOpr;
1256         offset += off.result()->as_jint();
1257       }
1258       LIR_Opr base_op = buf.result();
1259 
1260       if (index->is_valid()) {
1261         LIR_Opr tmp = new_register(T_LONG);
1262         __ convert(Bytecodes::_i2l, index, tmp);
1263         index = tmp;
1264       }
1265 
1266       LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
1267 
1268       BasicTypeList signature(3);
1269       signature.append(T_INT);
1270       signature.append(T_ADDRESS);
1271       signature.append(T_INT);
1272       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1273       const LIR_Opr result_reg = result_register_for (x->type());
1274 
1275       LIR_Opr arg1 = cc->at(0);
1276       LIR_Opr arg2 = cc->at(1);
1277       LIR_Opr arg3 = cc->at(2);
1278 
1279       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1280       __ leal(LIR_OprFact::address(a), arg2);
1281       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1282 
1283       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1284       __ move(result_reg, result);
1285       break;
1286     }
1287     default: {
1288       ShouldNotReachHere();
1289     }
1290   }
1291 }
1292 
1293 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1294   assert(x->number_of_arguments() == 3, "wrong type");
1295   assert(UseFMA, "Needs FMA instructions support.");
1296   LIRItem value(x->argument_at(0), this);
1297   LIRItem value1(x->argument_at(1), this);
1298   LIRItem value2(x->argument_at(2), this);
1299 
1300   value2.set_destroys_register();
1301 
1302   value.load_item();
1303   value1.load_item();
1304   value2.load_item();
1305 
1306   LIR_Opr calc_input = value.result();
1307   LIR_Opr calc_input1 = value1.result();
1308   LIR_Opr calc_input2 = value2.result();
1309   LIR_Opr calc_result = rlock_result(x);
1310 
1311   switch (x->id()) {
1312   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1313   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1314   default:                    ShouldNotReachHere();
1315   }
1316 }
1317 
1318 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1319   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1320 }