1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "vmreg_ppc.inline.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 void LIRItem::load_byte_item() {
  49   // Byte loads use same registers as other loads.
  50   load_item();
  51 }
  52 
  53 
  54 void LIRItem::load_nonconstant() {
  55   LIR_Opr r = value()->operand();
  56   if (_gen->can_inline_as_constant(value())) {
  57     if (!r->is_constant()) {
  58       r = LIR_OprFact::value_type(value()->type());
  59     }
  60     _result = r;
  61   } else {
  62     load_item();
  63   }
  64 }
  65 
  66 
  67 //--------------------------------------------------------------
  68 //               LIRGenerator
  69 //--------------------------------------------------------------
  70 
  71 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::R3_oop_opr; }
  72 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::R4_opr; }
  73 LIR_Opr LIRGenerator::syncLockOpr()                  { return FrameMap::R5_opr; }     // Need temp effect for MonitorEnterStub.
  74 LIR_Opr LIRGenerator::syncTempOpr()                  { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
  75 LIR_Opr LIRGenerator::getThreadTemp()                { return LIR_OprFact::illegalOpr; } // not needed
  76 
  77 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  78   LIR_Opr opr;
  79   switch (type->tag()) {
  80   case intTag:     opr = FrameMap::R3_opr;         break;
  81   case objectTag:  opr = FrameMap::R3_oop_opr;     break;
  82   case longTag:    opr = FrameMap::R3_long_opr;    break;
  83   case floatTag:   opr = FrameMap::F1_opr;         break;
  84   case doubleTag:  opr = FrameMap::F1_double_opr;  break;
  85 
  86   case addressTag:
  87   default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  88   }
  89 
  90   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  91   return opr;
  92 }
  93 
  94 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
  95   ShouldNotReachHere();
  96   return LIR_OprFact::illegalOpr;
  97 }
  98 
  99 
 100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 101   return new_register(T_INT);
 102 }
 103 
 104 
 105 //--------- loading items into registers --------------------------------
 106 
 107 // PPC cannot inline all constants.
 108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 109   if (v->type()->as_IntConstant() != NULL) {
 110     return Assembler::is_simm16(v->type()->as_IntConstant()->value());
 111   } else if (v->type()->as_LongConstant() != NULL) {
 112     return Assembler::is_simm16(v->type()->as_LongConstant()->value());
 113   } else if (v->type()->as_ObjectConstant() != NULL) {
 114     return v->type()->as_ObjectConstant()->value()->is_null_object();
 115   } else {
 116     return false;
 117   }
 118 }
 119 
 120 
 121 // Only simm16 constants can be inlined.
 122 bool LIRGenerator::can_inline_as_constant(Value i) const {
 123   return can_store_as_constant(i, as_BasicType(i->type()));
 124 }
 125 
 126 
 127 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 128   if (c->type() == T_INT) {
 129     return Assembler::is_simm16(c->as_jint());
 130   }
 131   if (c->type() == T_LONG) {
 132     return Assembler::is_simm16(c->as_jlong());
 133   }
 134   if (c->type() == T_OBJECT) {
 135     return c->as_jobject() == NULL;
 136   }
 137   return false;
 138 }
 139 
 140 
 141 LIR_Opr LIRGenerator::safepoint_poll_register() {
 142   return new_register(T_INT);
 143 }
 144 
 145 
 146 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 147                                             int shift, int disp, BasicType type) {
 148   assert(base->is_register(), "must be");
 149   intx large_disp = disp;
 150 
 151   // Accumulate fixed displacements.
 152   if (index->is_constant()) {
 153     LIR_Const *constant = index->as_constant_ptr();
 154     if (constant->type() == T_LONG) {
 155       large_disp += constant->as_jlong() << shift;
 156     } else {
 157       large_disp += (intx)(constant->as_jint()) << shift;
 158     }
 159     index = LIR_OprFact::illegalOpr;
 160   }
 161 
 162   if (index->is_register()) {
 163     // Apply the shift and accumulate the displacement.
 164     if (shift > 0) {
 165       LIR_Opr tmp = new_pointer_register();
 166       __ shift_left(index, shift, tmp);
 167       index = tmp;
 168     }
 169     if (large_disp != 0) {
 170       LIR_Opr tmp = new_pointer_register();
 171       if (Assembler::is_simm16(large_disp)) {
 172         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
 173         index = tmp;
 174       } else {
 175         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
 176         __ add(tmp, index, tmp);
 177         index = tmp;
 178       }
 179       large_disp = 0;
 180     }
 181   } else if (!Assembler::is_simm16(large_disp)) {
 182     // Index is illegal so replace it with the displacement loaded into a register.
 183     index = new_pointer_register();
 184     __ move(LIR_OprFact::intptrConst(large_disp), index);
 185     large_disp = 0;
 186   }
 187 
 188   // At this point we either have base + index or base + displacement.
 189   if (large_disp == 0) {
 190     return new LIR_Address(base, index, type);
 191   } else {
 192     assert(Assembler::is_simm16(large_disp), "must be");
 193     return new LIR_Address(base, large_disp, type);
 194   }
 195 }
 196 
 197 
 198 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 199                                               BasicType type) {
 200   int elem_size = type2aelembytes(type);
 201   int shift = exact_log2(elem_size);
 202 
 203   LIR_Opr base_opr;
 204   intx offset = arrayOopDesc::base_offset_in_bytes(type);
 205 
 206   if (index_opr->is_constant()) {
 207     intx i = index_opr->as_constant_ptr()->as_jint();
 208     intx array_offset = i * elem_size;
 209     if (Assembler::is_simm16(array_offset + offset)) {
 210       base_opr = array_opr;
 211       offset = array_offset + offset;
 212     } else {
 213       base_opr = new_pointer_register();
 214       if (Assembler::is_simm16(array_offset)) {
 215         __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
 216       } else {
 217         __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
 218         __ add(base_opr, array_opr, base_opr);
 219       }
 220     }
 221   } else {
 222 #ifdef _LP64
 223     if (index_opr->type() == T_INT) {
 224       LIR_Opr tmp = new_register(T_LONG);
 225       __ convert(Bytecodes::_i2l, index_opr, tmp);
 226       index_opr = tmp;
 227     }
 228 #endif
 229 
 230     base_opr = new_pointer_register();
 231     assert (index_opr->is_register(), "Must be register");
 232     if (shift > 0) {
 233       __ shift_left(index_opr, shift, base_opr);
 234       __ add(base_opr, array_opr, base_opr);
 235     } else {
 236       __ add(index_opr, array_opr, base_opr);
 237     }
 238   }
 239   return new LIR_Address(base_opr, offset, type);
 240 }
 241 
 242 
 243 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 244   LIR_Opr r = NULL;
 245   if (type == T_LONG) {
 246     r = LIR_OprFact::longConst(x);
 247   } else if (type == T_INT) {
 248     r = LIR_OprFact::intConst(x);
 249   } else {
 250     ShouldNotReachHere();
 251   }
 252   if (!Assembler::is_simm16(x)) {
 253     LIR_Opr tmp = new_register(type);
 254     __ move(r, tmp);
 255     return tmp;
 256   }
 257   return r;
 258 }
 259 
 260 
 261 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 262   LIR_Opr pointer = new_pointer_register();
 263   __ move(LIR_OprFact::intptrConst(counter), pointer);
 264   LIR_Address* addr = new LIR_Address(pointer, type);
 265   increment_counter(addr, step);
 266 }
 267 
 268 
 269 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 270   LIR_Opr temp = new_register(addr->type());
 271   __ move(addr, temp);
 272   __ add(temp, load_immediate(step, addr->type()), temp);
 273   __ move(temp, addr);
 274 }
 275 
 276 
 277 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 278   LIR_Opr tmp = FrameMap::R0_opr;
 279   __ load(new LIR_Address(base, disp, T_INT), tmp, info);
 280   __ cmp(condition, tmp, c);
 281 }
 282 
 283 
 284 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
 285                                int disp, BasicType type, CodeEmitInfo* info) {
 286   LIR_Opr tmp = FrameMap::R0_opr;
 287   __ load(new LIR_Address(base, disp, type), tmp, info);
 288   __ cmp(condition, reg, tmp);
 289 }
 290 
 291 
 292 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 293   assert(left != result, "should be different registers");
 294   if (is_power_of_2(c + 1)) {
 295     __ shift_left(left, log2_int(c + 1), result);
 296     __ sub(result, left, result);
 297     return true;
 298   } else if (is_power_of_2(c - 1)) {
 299     __ shift_left(left, log2_int(c - 1), result);
 300     __ add(result, left, result);
 301     return true;
 302   }
 303   return false;
 304 }
 305 
 306 
 307 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
 308   BasicType t = item->type();
 309   LIR_Opr sp_opr = FrameMap::SP_opr;
 310   if ((t == T_LONG || t == T_DOUBLE) &&
 311       ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
 312     __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
 313   } else {
 314     __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
 315   }
 316 }
 317 
 318 
 319 //----------------------------------------------------------------------
 320 //             visitor functions
 321 //----------------------------------------------------------------------
 322 
 323 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 324   // Following registers are used by slow_subtype_check:
 325   LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
 326   LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
 327   LIR_Opr tmp3 = FrameMap::R6_opr; // temp
 328   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 329 }
 330 
 331 
 332 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 333   assert(x->is_pinned(),"");
 334   LIRItem obj(x->obj(), this);
 335   obj.load_item();
 336 
 337   set_no_result(x);
 338 
 339   // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
 340   LIR_Opr lock    = FrameMap::R5_opr;
 341   LIR_Opr scratch = FrameMap::R4_opr;
 342   LIR_Opr hdr     = FrameMap::R6_opr;
 343 
 344   CodeEmitInfo* info_for_exception = NULL;
 345   if (x->needs_null_check()) {
 346     info_for_exception = state_for(x);
 347   }
 348 
 349   // This CodeEmitInfo must not have the xhandlers because here the
 350   // object is already locked (xhandlers expects object to be unlocked).
 351   CodeEmitInfo* info = state_for(x, x->state(), true);
 352   monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
 353 }
 354 
 355 
 356 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 357   assert(x->is_pinned(),"");
 358   LIRItem obj(x->obj(), this);
 359   obj.dont_load_item();
 360 
 361   set_no_result(x);
 362   LIR_Opr lock     = FrameMap::R5_opr;
 363   LIR_Opr hdr      = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
 364   LIR_Opr obj_temp = FrameMap::R6_opr;
 365   monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
 366 }
 367 
 368 
 369 // _ineg, _lneg, _fneg, _dneg
 370 void LIRGenerator::do_NegateOp(NegateOp* x) {
 371   LIRItem value(x->x(), this);
 372   value.load_item();
 373   LIR_Opr reg = rlock_result(x);
 374   __ negate(value.result(), reg);
 375 }
 376 
 377 
 378 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 379 //      _dadd, _dmul, _dsub, _ddiv, _drem
 380 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 381   switch (x->op()) {
 382   case Bytecodes::_fadd:
 383   case Bytecodes::_fmul:
 384   case Bytecodes::_fsub:
 385   case Bytecodes::_fdiv:
 386   case Bytecodes::_dadd:
 387   case Bytecodes::_dmul:
 388   case Bytecodes::_dsub:
 389   case Bytecodes::_ddiv: {
 390     LIRItem left(x->x(), this);
 391     LIRItem right(x->y(), this);
 392     left.load_item();
 393     right.load_item();
 394     rlock_result(x);
 395     arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
 396   }
 397   break;
 398 
 399   case Bytecodes::_frem:
 400   case Bytecodes::_drem: {
 401     address entry = NULL;
 402     switch (x->op()) {
 403     case Bytecodes::_frem:
 404       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 405       break;
 406     case Bytecodes::_drem:
 407       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 408       break;
 409     default:
 410       ShouldNotReachHere();
 411     }
 412     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
 413     set_result(x, result);
 414   }
 415   break;
 416 
 417   default: ShouldNotReachHere();
 418   }
 419 }
 420 
 421 
 422 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 423 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 424   bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
 425 
 426   LIRItem right(x->y(), this);
 427   // Missing test if instr is commutative and if we should swap.
 428   if (right.value()->type()->as_LongConstant() &&
 429       (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) {
 430     // Sub is implemented by addi and can't support min_simm16 as constant..
 431     right.load_item();
 432   } else {
 433     right.load_nonconstant();
 434   }
 435   assert(right.is_constant() || right.is_register(), "wrong state of right");
 436 
 437   if (is_div_rem) {
 438     LIR_Opr divisor = right.result();
 439     if (divisor->is_register()) {
 440       CodeEmitInfo* null_check_info = state_for(x);
 441       __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
 442       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(null_check_info));
 443     } else {
 444       jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
 445       if (const_divisor == 0) {
 446         CodeEmitInfo* null_check_info = state_for(x);
 447         __ jump(new DivByZeroStub(null_check_info));
 448         rlock_result(x);
 449         __ move(LIR_OprFact::longConst(0), x->operand()); // dummy
 450         return;
 451       }
 452       if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 453         // Remainder computation would need additional tmp != R0.
 454         right.load_item();
 455       }
 456     }
 457   }
 458 
 459   LIRItem left(x->x(), this);
 460   left.load_item();
 461   rlock_result(x);
 462   if (is_div_rem) {
 463     CodeEmitInfo* info = NULL; // Null check already done above.
 464     LIR_Opr tmp = FrameMap::R0_opr;
 465     if (x->op() == Bytecodes::_lrem) {
 466       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 467     } else if (x->op() == Bytecodes::_ldiv) {
 468       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 469     }
 470   } else {
 471     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 472   }
 473 }
 474 
 475 
 476 // for: _iadd, _imul, _isub, _idiv, _irem
 477 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 478   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
 479 
 480   LIRItem right(x->y(), this);
 481   // Missing test if instr is commutative and if we should swap.
 482   if (right.value()->type()->as_IntConstant() &&
 483       (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) {
 484     // Sub is implemented by addi and can't support min_simm16 as constant.
 485     right.load_item();
 486   } else {
 487     right.load_nonconstant();
 488   }
 489   assert(right.is_constant() || right.is_register(), "wrong state of right");
 490 
 491   if (is_div_rem) {
 492     LIR_Opr divisor = right.result();
 493     if (divisor->is_register()) {
 494       CodeEmitInfo* null_check_info = state_for(x);
 495       __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
 496       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(null_check_info));
 497     } else {
 498       jint const_divisor = divisor->as_constant_ptr()->as_jint();
 499       if (const_divisor == 0) {
 500         CodeEmitInfo* null_check_info = state_for(x);
 501         __ jump(new DivByZeroStub(null_check_info));
 502         rlock_result(x);
 503         __ move(LIR_OprFact::intConst(0), x->operand()); // dummy
 504         return;
 505       }
 506       if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 507         // Remainder computation would need additional tmp != R0.
 508         right.load_item();
 509       }
 510     }
 511   }
 512 
 513   LIRItem left(x->x(), this);
 514   left.load_item();
 515   rlock_result(x);
 516   if (is_div_rem) {
 517     CodeEmitInfo* info = NULL; // Null check already done above.
 518     LIR_Opr tmp = FrameMap::R0_opr;
 519     if (x->op() == Bytecodes::_irem) {
 520       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 521     } else if (x->op() == Bytecodes::_idiv) {
 522       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 523     }
 524   } else {
 525     arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
 526   }
 527 }
 528 
 529 
 530 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 531   ValueTag tag = x->type()->tag();
 532   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 533   switch (tag) {
 534     case floatTag:
 535     case doubleTag: do_ArithmeticOp_FPU(x);  return;
 536     case longTag:   do_ArithmeticOp_Long(x); return;
 537     case intTag:    do_ArithmeticOp_Int(x);  return;
 538     default: ShouldNotReachHere();
 539   }
 540 }
 541 
 542 
 543 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 544 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 545   LIRItem value(x->x(), this);
 546   LIRItem count(x->y(), this);
 547   value.load_item();
 548   LIR_Opr reg = rlock_result(x);
 549   LIR_Opr mcount;
 550   if (count.result()->is_register()) {
 551     mcount = FrameMap::R0_opr;
 552   } else {
 553     mcount = LIR_OprFact::illegalOpr;
 554   }
 555   shift_op(x->op(), reg, value.result(), count.result(), mcount);
 556 }
 557 
 558 
 559 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
 560   jlong int_or_long_const;
 561   if (type->as_IntConstant()) {
 562     int_or_long_const = type->as_IntConstant()->value();
 563   } else if (type->as_LongConstant()) {
 564     int_or_long_const = type->as_LongConstant()->value();
 565   } else if (type->as_ObjectConstant()) {
 566     return type->as_ObjectConstant()->value()->is_null_object();
 567   } else {
 568     return false;
 569   }
 570 
 571   if (Assembler::is_uimm(int_or_long_const, 16)) return true;
 572   if ((int_or_long_const & 0xFFFF) == 0 &&
 573       Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
 574 
 575   // see Assembler::andi
 576   if (bc == Bytecodes::_iand &&
 577       (is_power_of_2_long(int_or_long_const+1) ||
 578        is_power_of_2_long(int_or_long_const) ||
 579        is_power_of_2_long(-int_or_long_const))) return true;
 580   if (bc == Bytecodes::_land &&
 581       (is_power_of_2_long(int_or_long_const+1) ||
 582        (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2_long(int_or_long_const)) ||
 583        (int_or_long_const != min_jlong && is_power_of_2_long(-int_or_long_const)))) return true;
 584 
 585   // special case: xor -1
 586   if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
 587       int_or_long_const == -1) return true;
 588   return false;
 589 }
 590 
 591 
 592 // _iand, _land, _ior, _lor, _ixor, _lxor
 593 void LIRGenerator::do_LogicOp(LogicOp* x) {
 594   LIRItem left(x->x(), this);
 595   LIRItem right(x->y(), this);
 596 
 597   left.load_item();
 598 
 599   Value rval = right.value();
 600   LIR_Opr r = rval->operand();
 601   ValueType *type = rval->type();
 602   // Logic instructions use unsigned immediate values.
 603   if (can_handle_logic_op_as_uimm(type, x->op())) {
 604     if (!r->is_constant()) {
 605       r = LIR_OprFact::value_type(type);
 606       rval->set_operand(r);
 607     }
 608     right.set_result(r);
 609   } else {
 610     right.load_item();
 611   }
 612 
 613   LIR_Opr reg = rlock_result(x);
 614 
 615   logic_op(x->op(), reg, left.result(), right.result());
 616 }
 617 
 618 
 619 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 620 void LIRGenerator::do_CompareOp(CompareOp* x) {
 621   LIRItem left(x->x(), this);
 622   LIRItem right(x->y(), this);
 623   left.load_item();
 624   right.load_item();
 625   LIR_Opr reg = rlock_result(x);
 626   if (x->x()->type()->is_float_kind()) {
 627     Bytecodes::Code code = x->op();
 628     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 629   } else if (x->x()->type()->tag() == longTag) {
 630     __ lcmp2int(left.result(), right.result(), reg);
 631   } else {
 632     Unimplemented();
 633   }
 634 }
 635 
 636 
 637 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 638   LIR_Opr result = new_register(T_INT);
 639   LIR_Opr t1 = LIR_OprFact::illegalOpr;
 640   LIR_Opr t2 = LIR_OprFact::illegalOpr;
 641   cmp_value.load_item();
 642   new_value.load_item();
 643 
 644   // Volatile load may be followed by Unsafe CAS.
 645   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 646     __ membar();
 647   } else {
 648     __ membar_release();
 649   }
 650 
 651   if (type == T_OBJECT || type == T_ARRAY) {
 652     if (UseCompressedOops) {
 653       t1 = new_register(T_OBJECT);
 654       t2 = new_register(T_OBJECT);
 655     }
 656     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 657   } else if (type == T_INT) {
 658     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 659   } else if (type == T_LONG) {
 660     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 661   } else {
 662     Unimplemented();
 663   }
 664   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 665            result, type);
 666   return result;
 667 }
 668 
 669 
 670 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 671   LIR_Opr result = new_register(type);
 672   LIR_Opr tmp = FrameMap::R0_opr;
 673 
 674   value.load_item();
 675 
 676   // Volatile load may be followed by Unsafe CAS.
 677   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 678     __ membar();
 679   } else {
 680     __ membar_release();
 681   }
 682 
 683   __ xchg(addr, value.result(), result, tmp);
 684 
 685   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 686     __ membar_acquire();
 687   } else {
 688     __ membar();
 689   }
 690   return result;
 691 }
 692 
 693 
 694 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 695   LIR_Opr result = new_register(type);
 696   LIR_Opr tmp = FrameMap::R0_opr;
 697 
 698   value.load_item();
 699 
 700   // Volatile load may be followed by Unsafe CAS.
 701   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 702     __ membar(); // To be safe. Unsafe semantics are unclear.
 703   } else {
 704     __ membar_release();
 705   }
 706 
 707   __ xadd(addr, value.result(), result, tmp);
 708 
 709   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 710     __ membar_acquire();
 711   } else {
 712     __ membar();
 713   }
 714   return result;
 715 }
 716 
 717 
 718 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 719   switch (x->id()) {
 720     case vmIntrinsics::_dabs: {
 721       assert(x->number_of_arguments() == 1, "wrong type");
 722       LIRItem value(x->argument_at(0), this);
 723       value.load_item();
 724       LIR_Opr dst = rlock_result(x);
 725       __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 726       break;
 727     }
 728     case vmIntrinsics::_dsqrt: {
 729       if (VM_Version::has_fsqrt()) {
 730         assert(x->number_of_arguments() == 1, "wrong type");
 731         LIRItem value(x->argument_at(0), this);
 732         value.load_item();
 733         LIR_Opr dst = rlock_result(x);
 734         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 735         break;
 736       } // else fallthru
 737     }
 738     case vmIntrinsics::_dsin:   // fall through
 739     case vmIntrinsics::_dcos:   // fall through
 740     case vmIntrinsics::_dtan:   // fall through
 741     case vmIntrinsics::_dlog:   // fall through
 742     case vmIntrinsics::_dlog10: // fall through
 743     case vmIntrinsics::_dexp: {
 744       assert(x->number_of_arguments() == 1, "wrong type");
 745 
 746       address runtime_entry = NULL;
 747       switch (x->id()) {
 748         case vmIntrinsics::_dsqrt:
 749           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
 750           break;
 751         case vmIntrinsics::_dsin:
 752           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 753           break;
 754         case vmIntrinsics::_dcos:
 755           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 756           break;
 757         case vmIntrinsics::_dtan:
 758           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 759           break;
 760         case vmIntrinsics::_dlog:
 761           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 762           break;
 763         case vmIntrinsics::_dlog10:
 764           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 765           break;
 766         case vmIntrinsics::_dexp:
 767           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 768           break;
 769         default:
 770           ShouldNotReachHere();
 771       }
 772 
 773       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 774       set_result(x, result);
 775       break;
 776     }
 777     case vmIntrinsics::_dpow: {
 778       assert(x->number_of_arguments() == 2, "wrong type");
 779       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 780       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 781       set_result(x, result);
 782       break;
 783     }
 784     default:
 785       break;
 786   }
 787 }
 788 
 789 
 790 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 791   assert(x->number_of_arguments() == 5, "wrong type");
 792 
 793   // Make all state_for calls early since they can emit code.
 794   CodeEmitInfo* info = state_for(x, x->state());
 795 
 796   LIRItem src     (x->argument_at(0), this);
 797   LIRItem src_pos (x->argument_at(1), this);
 798   LIRItem dst     (x->argument_at(2), this);
 799   LIRItem dst_pos (x->argument_at(3), this);
 800   LIRItem length  (x->argument_at(4), this);
 801 
 802   // Load all values in callee_save_registers (C calling convention),
 803   // as this makes the parameter passing to the fast case simpler.
 804   src.load_item_force     (FrameMap::R14_oop_opr);
 805   src_pos.load_item_force (FrameMap::R15_opr);
 806   dst.load_item_force     (FrameMap::R17_oop_opr);
 807   dst_pos.load_item_force (FrameMap::R18_opr);
 808   length.load_item_force  (FrameMap::R19_opr);
 809   LIR_Opr tmp =            FrameMap::R20_opr;
 810 
 811   int flags;
 812   ciArrayKlass* expected_type;
 813   arraycopy_helper(x, &flags, &expected_type);
 814 
 815   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
 816                length.result(), tmp,
 817                expected_type, flags, info);
 818   set_no_result(x);
 819 }
 820 
 821 
 822 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 823 // _i2b, _i2c, _i2s
 824 void LIRGenerator::do_Convert(Convert* x) {
 825   if (!VM_Version::has_mtfprd()) {
 826     switch (x->op()) {
 827 
 828       // int -> float: force spill
 829       case Bytecodes::_l2f: {
 830         if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
 831           // fcfid+frsp needs fixup code to avoid rounding incompatibility.
 832           address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
 833           LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
 834           set_result(x, result);
 835           return;
 836         } // else fallthru
 837       }
 838       case Bytecodes::_l2d: {
 839         LIRItem value(x->value(), this);
 840         LIR_Opr reg = rlock_result(x);
 841         value.load_item();
 842         LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
 843         __ convert(x->op(), tmp, reg);
 844         return;
 845       }
 846       case Bytecodes::_i2f:
 847       case Bytecodes::_i2d: {
 848         LIRItem value(x->value(), this);
 849         LIR_Opr reg = rlock_result(x);
 850         value.load_item();
 851         // Convert i2l first.
 852         LIR_Opr tmp1 = new_register(T_LONG);
 853         __ convert(Bytecodes::_i2l, value.result(), tmp1);
 854         LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
 855         __ convert(x->op(), tmp2, reg);
 856         return;
 857       }
 858 
 859       // float -> int: result will be stored
 860       case Bytecodes::_f2l:
 861       case Bytecodes::_d2l: {
 862         LIRItem value(x->value(), this);
 863         LIR_Opr reg = rlock_result(x);
 864         value.set_destroys_register(); // USE_KILL
 865         value.load_item();
 866         set_vreg_flag(reg, must_start_in_memory);
 867         __ convert(x->op(), value.result(), reg);
 868         return;
 869       }
 870       case Bytecodes::_f2i:
 871       case Bytecodes::_d2i: {
 872         LIRItem value(x->value(), this);
 873         LIR_Opr reg = rlock_result(x);
 874         value.set_destroys_register(); // USE_KILL
 875         value.load_item();
 876         // Convert l2i afterwards.
 877         LIR_Opr tmp1 = new_register(T_LONG);
 878         set_vreg_flag(tmp1, must_start_in_memory);
 879         __ convert(x->op(), value.result(), tmp1);
 880         __ convert(Bytecodes::_l2i, tmp1, reg);
 881         return;
 882       }
 883 
 884       // Within same category: just register conversions.
 885       case Bytecodes::_i2b:
 886       case Bytecodes::_i2c:
 887       case Bytecodes::_i2s:
 888       case Bytecodes::_i2l:
 889       case Bytecodes::_l2i:
 890       case Bytecodes::_f2d:
 891       case Bytecodes::_d2f:
 892         break;
 893 
 894       default: ShouldNotReachHere();
 895     }
 896   }
 897 
 898   // Register conversion.
 899   LIRItem value(x->value(), this);
 900   LIR_Opr reg = rlock_result(x);
 901   value.load_item();
 902   switch (x->op()) {
 903     case Bytecodes::_f2l:
 904     case Bytecodes::_d2l:
 905     case Bytecodes::_f2i:
 906     case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
 907     default: break;
 908   }
 909   __ convert(x->op(), value.result(), reg);
 910 }
 911 
 912 
 913 void LIRGenerator::do_NewInstance(NewInstance* x) {
 914   // This instruction can be deoptimized in the slow path.
 915   const LIR_Opr reg = result_register_for(x->type());
 916 #ifndef PRODUCT
 917   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 918     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 919   }
 920 #endif
 921   CodeEmitInfo* info = state_for(x, x->state());
 922   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
 923   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 924   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 925   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 926   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 927   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
 928 
 929   // Must prevent reordering of stores for object initialization
 930   // with stores that publish the new object.
 931   __ membar_storestore();
 932   LIR_Opr result = rlock_result(x);
 933   __ move(reg, result);
 934 }
 935 
 936 
 937 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 938   // Evaluate state_for early since it may emit code.
 939   CodeEmitInfo* info = state_for(x, x->state());
 940 
 941   LIRItem length(x->length(), this);
 942   length.load_item();
 943 
 944   LIR_Opr reg = result_register_for(x->type());
 945   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
 946   // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
 947   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 948   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 949   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 950   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 951   LIR_Opr len = length.result();
 952   BasicType elem_type = x->elt_type();
 953 
 954   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 955 
 956   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 957   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 958 
 959   // Must prevent reordering of stores for object initialization
 960   // with stores that publish the new object.
 961   __ membar_storestore();
 962   LIR_Opr result = rlock_result(x);
 963   __ move(reg, result);
 964 }
 965 
 966 
 967 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 968   // Evaluate state_for early since it may emit code.
 969   CodeEmitInfo* info = state_for(x, x->state());
 970   // In case of patching (i.e., object class is not yet loaded),
 971   // we need to reexecute the instruction and therefore provide
 972   // the state before the parameters have been consumed.
 973   CodeEmitInfo* patching_info = NULL;
 974   if (!x->klass()->is_loaded() || PatchALot) {
 975     patching_info = state_for(x, x->state_before());
 976   }
 977 
 978   LIRItem length(x->length(), this);
 979   length.load_item();
 980 
 981   const LIR_Opr reg = result_register_for(x->type());
 982   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
 983   // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
 984   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 985   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 986   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 987   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 988   LIR_Opr len = length.result();
 989 
 990   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 991   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
 992   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 993     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 994   }
 995   klass2reg_with_patching(klass_reg, obj, patching_info);
 996   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 997 
 998   // Must prevent reordering of stores for object initialization
 999   // with stores that publish the new object.
1000   __ membar_storestore();
1001   LIR_Opr result = rlock_result(x);
1002   __ move(reg, result);
1003 }
1004 
1005 
1006 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1007   Values* dims = x->dims();
1008   int i = dims->length();
1009   LIRItemList* items = new LIRItemList(i, i, NULL);
1010   while (i-- > 0) {
1011     LIRItem* size = new LIRItem(dims->at(i), this);
1012     items->at_put(i, size);
1013   }
1014 
1015   // Evaluate state_for early since it may emit code.
1016   CodeEmitInfo* patching_info = NULL;
1017   if (!x->klass()->is_loaded() || PatchALot) {
1018     patching_info = state_for(x, x->state_before());
1019 
1020     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1021     // clone all handlers (NOTE: Usually this is handled transparently
1022     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1023     // is done explicitly here because a stub isn't being used).
1024     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1025   }
1026   CodeEmitInfo* info = state_for(x, x->state());
1027 
1028   i = dims->length();
1029   while (i-- > 0) {
1030     LIRItem* size = items->at(i);
1031     size->load_nonconstant();
1032     // FrameMap::_reserved_argument_area_size includes the dimensions
1033     // varargs, because it's initialized to hir()->max_stack() when the
1034     // FrameMap is created.
1035     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
1036   }
1037 
1038   const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
1039   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1040 
1041   LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
1042   __ move(LIR_OprFact::intConst(x->rank()), rank);
1043 
1044   LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
1045   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
1046           varargs);
1047 
1048   // Note: This instruction can be deoptimized in the slow path.
1049   LIR_OprList* args = new LIR_OprList(3);
1050   args->append(klass_reg);
1051   args->append(rank);
1052   args->append(varargs);
1053   const LIR_Opr reg = result_register_for(x->type());
1054   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1055                   LIR_OprFact::illegalOpr,
1056                   reg, args, info);
1057 
1058   // Must prevent reordering of stores for object initialization
1059   // with stores that publish the new object.
1060   __ membar_storestore();
1061   LIR_Opr result = rlock_result(x);
1062   __ move(reg, result);
1063 }
1064 
1065 
1066 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1067   // nothing to do for now
1068 }
1069 
1070 
1071 void LIRGenerator::do_CheckCast(CheckCast* x) {
1072   LIRItem obj(x->obj(), this);
1073   CodeEmitInfo* patching_info = NULL;
1074   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1075     // Must do this before locking the destination register as
1076     // an oop register, and before the obj is loaded (so x->obj()->item()
1077     // is valid for creating a debug info location).
1078     patching_info = state_for(x, x->state_before());
1079   }
1080   obj.load_item();
1081   LIR_Opr out_reg = rlock_result(x);
1082   CodeStub* stub;
1083   CodeEmitInfo* info_for_exception =
1084       (x->needs_exception_state() ? state_for(x) :
1085                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1086 
1087   if (x->is_incompatible_class_change_check()) {
1088     assert(patching_info == NULL, "can't patch this");
1089     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1090                                    LIR_OprFact::illegalOpr, info_for_exception);
1091   } else if (x->is_invokespecial_receiver_check()) {
1092     assert(patching_info == NULL, "can't patch this");
1093     stub = new DeoptimizeStub(info_for_exception,
1094                               Deoptimization::Reason_class_check,
1095                               Deoptimization::Action_none);
1096   } else {
1097     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1098   }
1099   // Following registers are used by slow_subtype_check:
1100   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1101   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1102   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1103   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1104                x->direct_compare(), info_for_exception, patching_info, stub,
1105                x->profiled_method(), x->profiled_bci());
1106 }
1107 
1108 
1109 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1110   LIRItem obj(x->obj(), this);
1111   CodeEmitInfo* patching_info = NULL;
1112   if (!x->klass()->is_loaded() || PatchALot) {
1113     patching_info = state_for(x, x->state_before());
1114   }
1115   // Ensure the result register is not the input register because the
1116   // result is initialized before the patching safepoint.
1117   obj.load_item();
1118   LIR_Opr out_reg = rlock_result(x);
1119   // Following registers are used by slow_subtype_check:
1120   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1121   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1122   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1123   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1124                 x->direct_compare(), patching_info,
1125                 x->profiled_method(), x->profiled_bci());
1126 }
1127 
1128 
1129 void LIRGenerator::do_If(If* x) {
1130   assert(x->number_of_sux() == 2, "inconsistency");
1131   ValueTag tag = x->x()->type()->tag();
1132   LIRItem xitem(x->x(), this);
1133   LIRItem yitem(x->y(), this);
1134   LIRItem* xin = &xitem;
1135   LIRItem* yin = &yitem;
1136   If::Condition cond = x->cond();
1137 
1138   LIR_Opr left = LIR_OprFact::illegalOpr;
1139   LIR_Opr right = LIR_OprFact::illegalOpr;
1140 
1141   xin->load_item();
1142   left = xin->result();
1143 
1144   if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1145       Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1146     // Inline int constants which are small enough to be immediate operands.
1147     right = LIR_OprFact::value_type(yin->value()->type());
1148   } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1149              (cond == If::eql || cond == If::neq)) {
1150     // Inline long zero.
1151     right = LIR_OprFact::value_type(yin->value()->type());
1152   } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1153     right = LIR_OprFact::value_type(yin->value()->type());
1154   } else {
1155     yin->load_item();
1156     right = yin->result();
1157   }
1158   set_no_result(x);
1159 
1160   // Add safepoint before generating condition code so it can be recomputed.
1161   if (x->is_safepoint()) {
1162     // Increment backedge counter if needed.
1163     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1164         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1165     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1166   }
1167 
1168   __ cmp(lir_cond(cond), left, right);
1169   // Generate branch profiling. Profiling code doesn't kill flags.
1170   profile_branch(x, cond);
1171   move_to_phi(x->state());
1172   if (x->x()->type()->is_float_kind()) {
1173     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1174   } else {
1175     __ branch(lir_cond(cond), right->type(), x->tsux());
1176   }
1177   assert(x->default_sux() == x->fsux(), "wrong destination above");
1178   __ jump(x->default_sux());
1179 }
1180 
1181 
1182 LIR_Opr LIRGenerator::getThreadPointer() {
1183   return FrameMap::as_pointer_opr(R16_thread);
1184 }
1185 
1186 
1187 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1188   LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1189   __ move(LIR_OprFact::intConst(block->block_id()), arg1);
1190   LIR_OprList* args = new LIR_OprList(1);
1191   args->append(arg1);
1192   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1193   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1194 }
1195 
1196 
1197 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1198                                         CodeEmitInfo* info) {
1199 #ifdef _LP64
1200   __ store(value, address, info);
1201 #else
1202   Unimplemented();
1203 //  __ volatile_store_mem_reg(value, address, info);
1204 #endif
1205 }
1206 
1207 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1208                                        CodeEmitInfo* info) {
1209 #ifdef _LP64
1210   __ load(address, result, info);
1211 #else
1212   Unimplemented();
1213 //  __ volatile_load_mem_reg(address, result, info);
1214 #endif
1215 }
1216 
1217 
1218 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1219   assert(UseCRC32Intrinsics, "or should not be here");
1220   LIR_Opr result = rlock_result(x);
1221 
1222   switch (x->id()) {
1223     case vmIntrinsics::_updateCRC32: {
1224       LIRItem crc(x->argument_at(0), this);
1225       LIRItem val(x->argument_at(1), this);
1226       // Registers destroyed by update_crc32.
1227       crc.set_destroys_register();
1228       val.set_destroys_register();
1229       crc.load_item();
1230       val.load_item();
1231       __ update_crc32(crc.result(), val.result(), result);
1232       break;
1233     }
1234     case vmIntrinsics::_updateBytesCRC32:
1235     case vmIntrinsics::_updateByteBufferCRC32: {
1236       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1237 
1238       LIRItem crc(x->argument_at(0), this);
1239       LIRItem buf(x->argument_at(1), this);
1240       LIRItem off(x->argument_at(2), this);
1241       LIRItem len(x->argument_at(3), this);
1242       buf.load_item();
1243       off.load_nonconstant();
1244 
1245       LIR_Opr index = off.result();
1246       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1247       if (off.result()->is_constant()) {
1248         index = LIR_OprFact::illegalOpr;
1249         offset += off.result()->as_jint();
1250       }
1251       LIR_Opr base_op = buf.result();
1252       LIR_Address* a = NULL;
1253 
1254       if (index->is_valid()) {
1255         LIR_Opr tmp = new_register(T_LONG);
1256         __ convert(Bytecodes::_i2l, index, tmp);
1257         index = tmp;
1258         __ add(index, LIR_OprFact::intptrConst(offset), index);
1259         a = new LIR_Address(base_op, index, T_BYTE);
1260       } else {
1261         a = new LIR_Address(base_op, offset, T_BYTE);
1262       }
1263 
1264       BasicTypeList signature(3);
1265       signature.append(T_INT);
1266       signature.append(T_ADDRESS);
1267       signature.append(T_INT);
1268       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1269       const LIR_Opr result_reg = result_register_for(x->type());
1270 
1271       LIR_Opr arg1 = cc->at(0),
1272               arg2 = cc->at(1),
1273               arg3 = cc->at(2);
1274 
1275       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1276       __ leal(LIR_OprFact::address(a), arg2);
1277       len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
1278 
1279       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1280       __ move(result_reg, result);
1281       break;
1282     }
1283     default: {
1284       ShouldNotReachHere();
1285     }
1286   }
1287 }
1288 
1289 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1290   assert(UseCRC32CIntrinsics, "or should not be here");
1291   LIR_Opr result = rlock_result(x);
1292 
1293   switch (x->id()) {
1294     case vmIntrinsics::_updateBytesCRC32C:
1295     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1296       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1297 
1298       LIRItem crc(x->argument_at(0), this);
1299       LIRItem buf(x->argument_at(1), this);
1300       LIRItem off(x->argument_at(2), this);
1301       LIRItem end(x->argument_at(3), this);
1302       buf.load_item();
1303       off.load_nonconstant();
1304       end.load_nonconstant();
1305 
1306       // len = end - off
1307       LIR_Opr len  = end.result();
1308       LIR_Opr tmpA = new_register(T_INT);
1309       LIR_Opr tmpB = new_register(T_INT);
1310       __ move(end.result(), tmpA);
1311       __ move(off.result(), tmpB);
1312       __ sub(tmpA, tmpB, tmpA);
1313       len = tmpA;
1314 
1315       LIR_Opr index = off.result();
1316       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1317       if (off.result()->is_constant()) {
1318         index = LIR_OprFact::illegalOpr;
1319         offset += off.result()->as_jint();
1320       }
1321       LIR_Opr base_op = buf.result();
1322       LIR_Address* a = NULL;
1323 
1324       if (index->is_valid()) {
1325         LIR_Opr tmp = new_register(T_LONG);
1326         __ convert(Bytecodes::_i2l, index, tmp);
1327         index = tmp;
1328         __ add(index, LIR_OprFact::intptrConst(offset), index);
1329         a = new LIR_Address(base_op, index, T_BYTE);
1330       } else {
1331         a = new LIR_Address(base_op, offset, T_BYTE);
1332       }
1333 
1334       BasicTypeList signature(3);
1335       signature.append(T_INT);
1336       signature.append(T_ADDRESS);
1337       signature.append(T_INT);
1338       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1339       const LIR_Opr result_reg = result_register_for(x->type());
1340 
1341       LIR_Opr arg1 = cc->at(0),
1342               arg2 = cc->at(1),
1343               arg3 = cc->at(2);
1344 
1345       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1346       __ leal(LIR_OprFact::address(a), arg2);
1347       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1348 
1349       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1350       __ move(result_reg, result);
1351       break;
1352     }
1353     default: {
1354       ShouldNotReachHere();
1355     }
1356   }
1357 }
1358 
1359 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1360   assert(x->number_of_arguments() == 3, "wrong type");
1361   assert(UseFMA, "Needs FMA instructions support.");
1362   LIRItem value(x->argument_at(0), this);
1363   LIRItem value1(x->argument_at(1), this);
1364   LIRItem value2(x->argument_at(2), this);
1365 
1366   value.load_item();
1367   value1.load_item();
1368   value2.load_item();
1369 
1370   LIR_Opr calc_input = value.result();
1371   LIR_Opr calc_input1 = value1.result();
1372   LIR_Opr calc_input2 = value2.result();
1373   LIR_Opr calc_result = rlock_result(x);
1374 
1375   switch (x->id()) {
1376   case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1377   case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1378   default:                  ShouldNotReachHere();
1379   }
1380 }
1381 
1382 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1383   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1384 }