1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/vm_version.hpp"
  41 #include "utilities/bitMap.inline.hpp"
  42 #include "utilities/macros.hpp"
  43 #if INCLUDE_ALL_GCS
  44 #include "gc/g1/heapRegion.hpp"
  45 #endif // INCLUDE_ALL_GCS
  46 
  47 #ifdef ASSERT
  48 #define __ gen()->lir(__FILE__, __LINE__)->
  49 #else
  50 #define __ gen()->lir()->
  51 #endif
  52 
  53 #ifndef PATCHED_ADDR
  54 #define PATCHED_ADDR  (max_jint)
  55 #endif
  56 
  57 void PhiResolverState::reset(int max_vregs) {
  58   // Initialize array sizes
  59   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  60   _virtual_operands.trunc_to(0);
  61   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  62   _other_operands.trunc_to(0);
  63   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
  64   _vreg_table.trunc_to(0);
  65 }
  66 
  67 
  68 
  69 //--------------------------------------------------------------
  70 // PhiResolver
  71 
  72 // Resolves cycles:
  73 //
  74 //  r1 := r2  becomes  temp := r1
  75 //  r2 := r1           r1 := r2
  76 //                     r2 := temp
  77 // and orders moves:
  78 //
  79 //  r2 := r3  becomes  r1 := r2
  80 //  r1 := r2           r2 := r3
  81 
  82 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
  83  : _gen(gen)
  84  , _state(gen->resolver_state())
  85  , _temp(LIR_OprFact::illegalOpr)
  86 {
  87   // reinitialize the shared state arrays
  88   _state.reset(max_vregs);
  89 }
  90 
  91 
  92 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  93   assert(src->is_valid(), "");
  94   assert(dest->is_valid(), "");
  95   __ move(src, dest);
  96 }
  97 
  98 
  99 void PhiResolver::move_temp_to(LIR_Opr dest) {
 100   assert(_temp->is_valid(), "");
 101   emit_move(_temp, dest);
 102   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
 103 }
 104 
 105 
 106 void PhiResolver::move_to_temp(LIR_Opr src) {
 107   assert(_temp->is_illegal(), "");
 108   _temp = _gen->new_register(src->type());
 109   emit_move(src, _temp);
 110 }
 111 
 112 
 113 // Traverse assignment graph in depth first order and generate moves in post order
 114 // ie. two assignments: b := c, a := b start with node c:
 115 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
 116 // Generates moves in this order: move b to a and move c to b
 117 // ie. cycle a := b, b := a start with node a
 118 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
 119 // Generates moves in this order: move b to temp, move a to b, move temp to a
 120 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 121   if (!dest->visited()) {
 122     dest->set_visited();
 123     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 124       move(dest, dest->destination_at(i));
 125     }
 126   } else if (!dest->start_node()) {
 127     // cylce in graph detected
 128     assert(_loop == NULL, "only one loop valid!");
 129     _loop = dest;
 130     move_to_temp(src->operand());
 131     return;
 132   } // else dest is a start node
 133 
 134   if (!dest->assigned()) {
 135     if (_loop == dest) {
 136       move_temp_to(dest->operand());
 137       dest->set_assigned();
 138     } else if (src != NULL) {
 139       emit_move(src->operand(), dest->operand());
 140       dest->set_assigned();
 141     }
 142   }
 143 }
 144 
 145 
 146 PhiResolver::~PhiResolver() {
 147   int i;
 148   // resolve any cycles in moves from and to virtual registers
 149   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 150     ResolveNode* node = virtual_operands()[i];
 151     if (!node->visited()) {
 152       _loop = NULL;
 153       move(NULL, node);
 154       node->set_start_node();
 155       assert(_temp->is_illegal(), "move_temp_to() call missing");
 156     }
 157   }
 158 
 159   // generate move for move from non virtual register to abitrary destination
 160   for (i = other_operands().length() - 1; i >= 0; i --) {
 161     ResolveNode* node = other_operands()[i];
 162     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 163       emit_move(node->operand(), node->destination_at(j)->operand());
 164     }
 165   }
 166 }
 167 
 168 
 169 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 170   ResolveNode* node;
 171   if (opr->is_virtual()) {
 172     int vreg_num = opr->vreg_number();
 173     node = vreg_table().at_grow(vreg_num, NULL);
 174     assert(node == NULL || node->operand() == opr, "");
 175     if (node == NULL) {
 176       node = new ResolveNode(opr);
 177       vreg_table()[vreg_num] = node;
 178     }
 179     // Make sure that all virtual operands show up in the list when
 180     // they are used as the source of a move.
 181     if (source && !virtual_operands().contains(node)) {
 182       virtual_operands().append(node);
 183     }
 184   } else {
 185     assert(source, "");
 186     node = new ResolveNode(opr);
 187     other_operands().append(node);
 188   }
 189   return node;
 190 }
 191 
 192 
 193 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 194   assert(dest->is_virtual(), "");
 195   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 196   assert(src->is_valid(), "");
 197   assert(dest->is_valid(), "");
 198   ResolveNode* source = source_node(src);
 199   source->append(destination_node(dest));
 200 }
 201 
 202 
 203 //--------------------------------------------------------------
 204 // LIRItem
 205 
 206 void LIRItem::set_result(LIR_Opr opr) {
 207   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 208   value()->set_operand(opr);
 209 
 210   if (opr->is_virtual()) {
 211     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 212   }
 213 
 214   _result = opr;
 215 }
 216 
 217 void LIRItem::load_item() {
 218   if (result()->is_illegal()) {
 219     // update the items result
 220     _result = value()->operand();
 221   }
 222   if (!result()->is_register()) {
 223     LIR_Opr reg = _gen->new_register(value()->type());
 224     __ move(result(), reg);
 225     if (result()->is_constant()) {
 226       _result = reg;
 227     } else {
 228       set_result(reg);
 229     }
 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {
 238       _result = LIR_OprFact::value_type(value()->type());
 239     }
 240   } else if (type == T_BYTE || type == T_BOOLEAN) {
 241     load_byte_item();
 242   } else {
 243     load_item();
 244   }
 245 }
 246 
 247 void LIRItem::load_item_force(LIR_Opr reg) {
 248   LIR_Opr r = result();
 249   if (r != reg) {
 250 #if !defined(ARM) && !defined(E500V2)
 251     if (r->type() != reg->type()) {
 252       // moves between different types need an intervening spill slot
 253       r = _gen->force_to_spill(r, reg->type());
 254     }
 255 #endif
 256     __ move(r, reg);
 257     _result = reg;
 258   }
 259 }
 260 
 261 ciObject* LIRItem::get_jobject_constant() const {
 262   ObjectType* oc = type()->as_ObjectType();
 263   if (oc) {
 264     return oc->constant_value();
 265   }
 266   return NULL;
 267 }
 268 
 269 
 270 jint LIRItem::get_jint_constant() const {
 271   assert(is_constant() && value() != NULL, "");
 272   assert(type()->as_IntConstant() != NULL, "type check");
 273   return type()->as_IntConstant()->value();
 274 }
 275 
 276 
 277 jint LIRItem::get_address_constant() const {
 278   assert(is_constant() && value() != NULL, "");
 279   assert(type()->as_AddressConstant() != NULL, "type check");
 280   return type()->as_AddressConstant()->value();
 281 }
 282 
 283 
 284 jfloat LIRItem::get_jfloat_constant() const {
 285   assert(is_constant() && value() != NULL, "");
 286   assert(type()->as_FloatConstant() != NULL, "type check");
 287   return type()->as_FloatConstant()->value();
 288 }
 289 
 290 
 291 jdouble LIRItem::get_jdouble_constant() const {
 292   assert(is_constant() && value() != NULL, "");
 293   assert(type()->as_DoubleConstant() != NULL, "type check");
 294   return type()->as_DoubleConstant()->value();
 295 }
 296 
 297 
 298 jlong LIRItem::get_jlong_constant() const {
 299   assert(is_constant() && value() != NULL, "");
 300   assert(type()->as_LongConstant() != NULL, "type check");
 301   return type()->as_LongConstant()->value();
 302 }
 303 
 304 
 305 
 306 //--------------------------------------------------------------
 307 
 308 
 309 void LIRGenerator::init() {
 310   _bs = Universe::heap()->barrier_set();
 311 }
 312 
 313 
 314 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 315 #ifndef PRODUCT
 316   if (PrintIRWithLIR) {
 317     block->print();
 318   }
 319 #endif
 320 
 321   // set up the list of LIR instructions
 322   assert(block->lir() == NULL, "LIR list already computed for this block");
 323   _lir = new LIR_List(compilation(), block);
 324   block->set_lir(_lir);
 325 
 326   __ branch_destination(block->label());
 327 
 328   if (LIRTraceExecution &&
 329       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 330       !block->is_set(BlockBegin::exception_entry_flag)) {
 331     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 332     trace_block_entry(block);
 333   }
 334 }
 335 
 336 
 337 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 338 #ifndef PRODUCT
 339   if (PrintIRWithLIR) {
 340     tty->cr();
 341   }
 342 #endif
 343 
 344   // LIR_Opr for unpinned constants shouldn't be referenced by other
 345   // blocks so clear them out after processing the block.
 346   for (int i = 0; i < _unpinned_constants.length(); i++) {
 347     _unpinned_constants.at(i)->clear_operand();
 348   }
 349   _unpinned_constants.trunc_to(0);
 350 
 351   // clear our any registers for other local constants
 352   _constants.trunc_to(0);
 353   _reg_for_constants.trunc_to(0);
 354 }
 355 
 356 
 357 void LIRGenerator::block_do(BlockBegin* block) {
 358   CHECK_BAILOUT();
 359 
 360   block_do_prolog(block);
 361   set_block(block);
 362 
 363   for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
 364     if (instr->is_pinned()) do_root(instr);
 365   }
 366 
 367   set_block(NULL);
 368   block_do_epilog(block);
 369 }
 370 
 371 
 372 //-------------------------LIRGenerator-----------------------------
 373 
 374 // This is where the tree-walk starts; instr must be root;
 375 void LIRGenerator::do_root(Value instr) {
 376   CHECK_BAILOUT();
 377 
 378   InstructionMark im(compilation(), instr);
 379 
 380   assert(instr->is_pinned(), "use only with roots");
 381   assert(instr->subst() == instr, "shouldn't have missed substitution");
 382 
 383   instr->visit(this);
 384 
 385   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 386          instr->as_Constant() != NULL || bailed_out(), "invalid item set");
 387 }
 388 
 389 
 390 // This is called for each node in tree; the walk stops if a root is reached
 391 void LIRGenerator::walk(Value instr) {
 392   InstructionMark im(compilation(), instr);
 393   //stop walk when encounter a root
 394   if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
 395     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
 396   } else {
 397     assert(instr->subst() == instr, "shouldn't have missed substitution");
 398     instr->visit(this);
 399     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
 400   }
 401 }
 402 
 403 
 404 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 405   assert(state != NULL, "state must be defined");
 406 
 407 #ifndef PRODUCT
 408   state->verify();
 409 #endif
 410 
 411   ValueStack* s = state;
 412   for_each_state(s) {
 413     if (s->kind() == ValueStack::EmptyExceptionState) {
 414       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 415       continue;
 416     }
 417 
 418     int index;
 419     Value value;
 420     for_each_stack_value(s, index, value) {
 421       assert(value->subst() == value, "missed substitution");
 422       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 423         walk(value);
 424         assert(value->operand()->is_valid(), "must be evaluated now");
 425       }
 426     }
 427 
 428     int bci = s->bci();
 429     IRScope* scope = s->scope();
 430     ciMethod* method = scope->method();
 431 
 432     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 433     if (bci == SynchronizationEntryBCI) {
 434       if (x->as_ExceptionObject() || x->as_Throw()) {
 435         // all locals are dead on exit from the synthetic unlocker
 436         liveness.clear();
 437       } else {
 438         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 439       }
 440     }
 441     if (!liveness.is_valid()) {
 442       // Degenerate or breakpointed method.
 443       bailout("Degenerate or breakpointed method");
 444     } else {
 445       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 446       for_each_local_value(s, index, value) {
 447         assert(value->subst() == value, "missed substition");
 448         if (liveness.at(index) && !value->type()->is_illegal()) {
 449           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 450             walk(value);
 451             assert(value->operand()->is_valid(), "must be evaluated now");
 452           }
 453         } else {
 454           // NULL out this local so that linear scan can assume that all non-NULL values are live.
 455           s->invalidate_local(index);
 456         }
 457       }
 458     }
 459   }
 460 
 461   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 462 }
 463 
 464 
 465 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 466   return state_for(x, x->exception_state());
 467 }
 468 
 469 
 470 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 471   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
 472    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 473    * the class. */
 474   if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
 475     assert(info != NULL, "info must be set if class is not loaded");
 476     __ klass2reg_patch(NULL, r, info);
 477   } else {
 478     // no patching needed
 479     __ metadata2reg(obj->constant_encoding(), r);
 480   }
 481 }
 482 
 483 
 484 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 485                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 486   CodeStub* stub = new RangeCheckStub(range_check_info, index);
 487   if (index->is_constant()) {
 488     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 489                 index->as_jint(), null_check_info);
 490     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 491   } else {
 492     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 493                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 494     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 495   }
 496 }
 497 
 498 
 499 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 500   CodeStub* stub = new RangeCheckStub(info, index, true);
 501   if (index->is_constant()) {
 502     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
 503     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 504   } else {
 505     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
 506                 java_nio_Buffer::limit_offset(), T_INT, info);
 507     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 508   }
 509   __ move(index, result);
 510 }
 511 
 512 
 513 
 514 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
 515   LIR_Opr result_op = result;
 516   LIR_Opr left_op   = left;
 517   LIR_Opr right_op  = right;
 518 
 519   if (TwoOperandLIRForm && left_op != result_op) {
 520     assert(right_op != result_op, "malformed");
 521     __ move(left_op, result_op);
 522     left_op = result_op;
 523   }
 524 
 525   switch(code) {
 526     case Bytecodes::_dadd:
 527     case Bytecodes::_fadd:
 528     case Bytecodes::_ladd:
 529     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 530     case Bytecodes::_fmul:
 531     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 532 
 533     case Bytecodes::_dmul:
 534       {
 535         if (is_strictfp) {
 536           __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
 537         } else {
 538           __ mul(left_op, right_op, result_op); break;
 539         }
 540       }
 541       break;
 542 
 543     case Bytecodes::_imul:
 544       {
 545         bool    did_strength_reduce = false;
 546 
 547         if (right->is_constant()) {
 548           int c = right->as_jint();
 549           if (is_power_of_2(c)) {
 550             // do not need tmp here
 551             __ shift_left(left_op, exact_log2(c), result_op);
 552             did_strength_reduce = true;
 553           } else {
 554             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 555           }
 556         }
 557         // we couldn't strength reduce so just emit the multiply
 558         if (!did_strength_reduce) {
 559           __ mul(left_op, right_op, result_op);
 560         }
 561       }
 562       break;
 563 
 564     case Bytecodes::_dsub:
 565     case Bytecodes::_fsub:
 566     case Bytecodes::_lsub:
 567     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 568 
 569     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 570     // ldiv and lrem are implemented with a direct runtime call
 571 
 572     case Bytecodes::_ddiv:
 573       {
 574         if (is_strictfp) {
 575           __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
 576         } else {
 577           __ div (left_op, right_op, result_op); break;
 578         }
 579       }
 580       break;
 581 
 582     case Bytecodes::_drem:
 583     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 584 
 585     default: ShouldNotReachHere();
 586   }
 587 }
 588 
 589 
 590 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 591   arithmetic_op(code, result, left, right, false, tmp);
 592 }
 593 
 594 
 595 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 596   arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
 597 }
 598 
 599 
 600 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
 601   arithmetic_op(code, result, left, right, is_strictfp, tmp);
 602 }
 603 
 604 
 605 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 606   if (TwoOperandLIRForm && value != result_op) {
 607     assert(count != result_op, "malformed");
 608     __ move(value, result_op);
 609     value = result_op;
 610   }
 611 
 612   assert(count->is_constant() || count->is_register(), "must be");
 613   switch(code) {
 614   case Bytecodes::_ishl:
 615   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 616   case Bytecodes::_ishr:
 617   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 618   case Bytecodes::_iushr:
 619   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 620   default: ShouldNotReachHere();
 621   }
 622 }
 623 
 624 
 625 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 626   if (TwoOperandLIRForm && left_op != result_op) {
 627     assert(right_op != result_op, "malformed");
 628     __ move(left_op, result_op);
 629     left_op = result_op;
 630   }
 631 
 632   switch(code) {
 633     case Bytecodes::_iand:
 634     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 635 
 636     case Bytecodes::_ior:
 637     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 638 
 639     case Bytecodes::_ixor:
 640     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 641 
 642     default: ShouldNotReachHere();
 643   }
 644 }
 645 
 646 
 647 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
 648   if (!GenerateSynchronizationCode) return;
 649   // for slow path, use debug info for state after successful locking
 650   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 651   __ load_stack_address_monitor(monitor_no, lock);
 652   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 653   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 654 }
 655 
 656 
 657 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 658   if (!GenerateSynchronizationCode) return;
 659   // setup registers
 660   LIR_Opr hdr = lock;
 661   lock = new_hdr;
 662   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
 663   __ load_stack_address_monitor(monitor_no, lock);
 664   __ unlock_object(hdr, object, lock, scratch, slow_path);
 665 }
 666 
 667 #ifndef PRODUCT
 668 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 669   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 670     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 671   } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
 672     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 673   }
 674 }
 675 #endif
 676 
 677 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 678   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 679   // If klass is not loaded we do not know if the klass has finalizers:
 680   if (UseFastNewInstance && klass->is_loaded()
 681       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 682 
 683     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 684 
 685     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 686 
 687     assert(klass->is_loaded(), "must be loaded");
 688     // allocate space for instance
 689     assert(klass->size_helper() >= 0, "illegal instance size");
 690     const int instance_size = align_object_size(klass->size_helper());
 691     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 692                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 693   } else {
 694     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
 695     __ branch(lir_cond_always, T_ILLEGAL, slow_path);
 696     __ branch_destination(slow_path->continuation());
 697   }
 698 }
 699 
 700 
 701 static bool is_constant_zero(Instruction* inst) {
 702   IntConstant* c = inst->type()->as_IntConstant();
 703   if (c) {
 704     return (c->value() == 0);
 705   }
 706   return false;
 707 }
 708 
 709 
 710 static bool positive_constant(Instruction* inst) {
 711   IntConstant* c = inst->type()->as_IntConstant();
 712   if (c) {
 713     return (c->value() >= 0);
 714   }
 715   return false;
 716 }
 717 
 718 
 719 static ciArrayKlass* as_array_klass(ciType* type) {
 720   if (type != NULL && type->is_array_klass() && type->is_loaded()) {
 721     return (ciArrayKlass*)type;
 722   } else {
 723     return NULL;
 724   }
 725 }
 726 
 727 static ciType* phi_declared_type(Phi* phi) {
 728   ciType* t = phi->operand_at(0)->declared_type();
 729   if (t == NULL) {
 730     return NULL;
 731   }
 732   for(int i = 1; i < phi->operand_count(); i++) {
 733     if (t != phi->operand_at(i)->declared_type()) {
 734       return NULL;
 735     }
 736   }
 737   return t;
 738 }
 739 
 740 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 741   Instruction* src     = x->argument_at(0);
 742   Instruction* src_pos = x->argument_at(1);
 743   Instruction* dst     = x->argument_at(2);
 744   Instruction* dst_pos = x->argument_at(3);
 745   Instruction* length  = x->argument_at(4);
 746 
 747   // first try to identify the likely type of the arrays involved
 748   ciArrayKlass* expected_type = NULL;
 749   bool is_exact = false, src_objarray = false, dst_objarray = false;
 750   {
 751     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 752     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 753     Phi* phi;
 754     if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
 755       src_declared_type = as_array_klass(phi_declared_type(phi));
 756     }
 757     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 758     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 759     if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
 760       dst_declared_type = as_array_klass(phi_declared_type(phi));
 761     }
 762 
 763     if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
 764       // the types exactly match so the type is fully known
 765       is_exact = true;
 766       expected_type = src_exact_type;
 767     } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
 768       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 769       ciArrayKlass* src_type = NULL;
 770       if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
 771         src_type = (ciArrayKlass*) src_exact_type;
 772       } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
 773         src_type = (ciArrayKlass*) src_declared_type;
 774       }
 775       if (src_type != NULL) {
 776         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 777           is_exact = true;
 778           expected_type = dst_type;
 779         }
 780       }
 781     }
 782     // at least pass along a good guess
 783     if (expected_type == NULL) expected_type = dst_exact_type;
 784     if (expected_type == NULL) expected_type = src_declared_type;
 785     if (expected_type == NULL) expected_type = dst_declared_type;
 786 
 787     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 788     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 789   }
 790 
 791   // if a probable array type has been identified, figure out if any
 792   // of the required checks for a fast case can be elided.
 793   int flags = LIR_OpArrayCopy::all_flags;
 794 
 795   if (!src_objarray)
 796     flags &= ~LIR_OpArrayCopy::src_objarray;
 797   if (!dst_objarray)
 798     flags &= ~LIR_OpArrayCopy::dst_objarray;
 799 
 800   if (!x->arg_needs_null_check(0))
 801     flags &= ~LIR_OpArrayCopy::src_null_check;
 802   if (!x->arg_needs_null_check(2))
 803     flags &= ~LIR_OpArrayCopy::dst_null_check;
 804 
 805 
 806   if (expected_type != NULL) {
 807     Value length_limit = NULL;
 808 
 809     IfOp* ifop = length->as_IfOp();
 810     if (ifop != NULL) {
 811       // look for expressions like min(v, a.length) which ends up as
 812       //   x > y ? y : x  or  x >= y ? y : x
 813       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 814           ifop->x() == ifop->fval() &&
 815           ifop->y() == ifop->tval()) {
 816         length_limit = ifop->y();
 817       }
 818     }
 819 
 820     // try to skip null checks and range checks
 821     NewArray* src_array = src->as_NewArray();
 822     if (src_array != NULL) {
 823       flags &= ~LIR_OpArrayCopy::src_null_check;
 824       if (length_limit != NULL &&
 825           src_array->length() == length_limit &&
 826           is_constant_zero(src_pos)) {
 827         flags &= ~LIR_OpArrayCopy::src_range_check;
 828       }
 829     }
 830 
 831     NewArray* dst_array = dst->as_NewArray();
 832     if (dst_array != NULL) {
 833       flags &= ~LIR_OpArrayCopy::dst_null_check;
 834       if (length_limit != NULL &&
 835           dst_array->length() == length_limit &&
 836           is_constant_zero(dst_pos)) {
 837         flags &= ~LIR_OpArrayCopy::dst_range_check;
 838       }
 839     }
 840 
 841     // check from incoming constant values
 842     if (positive_constant(src_pos))
 843       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 844     if (positive_constant(dst_pos))
 845       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 846     if (positive_constant(length))
 847       flags &= ~LIR_OpArrayCopy::length_positive_check;
 848 
 849     // see if the range check can be elided, which might also imply
 850     // that src or dst is non-null.
 851     ArrayLength* al = length->as_ArrayLength();
 852     if (al != NULL) {
 853       if (al->array() == src) {
 854         // it's the length of the source array
 855         flags &= ~LIR_OpArrayCopy::length_positive_check;
 856         flags &= ~LIR_OpArrayCopy::src_null_check;
 857         if (is_constant_zero(src_pos))
 858           flags &= ~LIR_OpArrayCopy::src_range_check;
 859       }
 860       if (al->array() == dst) {
 861         // it's the length of the destination array
 862         flags &= ~LIR_OpArrayCopy::length_positive_check;
 863         flags &= ~LIR_OpArrayCopy::dst_null_check;
 864         if (is_constant_zero(dst_pos))
 865           flags &= ~LIR_OpArrayCopy::dst_range_check;
 866       }
 867     }
 868     if (is_exact) {
 869       flags &= ~LIR_OpArrayCopy::type_check;
 870     }
 871   }
 872 
 873   IntConstant* src_int = src_pos->type()->as_IntConstant();
 874   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 875   if (src_int && dst_int) {
 876     int s_offs = src_int->value();
 877     int d_offs = dst_int->value();
 878     if (src_int->value() >= dst_int->value()) {
 879       flags &= ~LIR_OpArrayCopy::overlapping;
 880     }
 881     if (expected_type != NULL) {
 882       BasicType t = expected_type->element_type()->basic_type();
 883       int element_size = type2aelembytes(t);
 884       if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
 885           ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
 886         flags &= ~LIR_OpArrayCopy::unaligned;
 887       }
 888     }
 889   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 890     // src and dest positions are the same, or dst is zero so assume
 891     // nonoverlapping copy.
 892     flags &= ~LIR_OpArrayCopy::overlapping;
 893   }
 894 
 895   if (src == dst) {
 896     // moving within a single array so no type checks are needed
 897     if (flags & LIR_OpArrayCopy::type_check) {
 898       flags &= ~LIR_OpArrayCopy::type_check;
 899     }
 900   }
 901   *flagsp = flags;
 902   *expected_typep = (ciArrayKlass*)expected_type;
 903 }
 904 
 905 
 906 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 907   assert(opr->is_register(), "why spill if item is not register?");
 908 
 909   if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
 910     LIR_Opr result = new_register(T_FLOAT);
 911     set_vreg_flag(result, must_start_in_memory);
 912     assert(opr->is_register(), "only a register can be spilled");
 913     assert(opr->value_type()->is_float(), "rounding only for floats available");
 914     __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 915     return result;
 916   }
 917   return opr;
 918 }
 919 
 920 
 921 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 922   assert(type2size[t] == type2size[value->type()],
 923          err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
 924   if (!value->is_register()) {
 925     // force into a register
 926     LIR_Opr r = new_register(value->type());
 927     __ move(value, r);
 928     value = r;
 929   }
 930 
 931   // create a spill location
 932   LIR_Opr tmp = new_register(t);
 933   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 934 
 935   // move from register to spill
 936   __ move(value, tmp);
 937   return tmp;
 938 }
 939 
 940 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 941   if (if_instr->should_profile()) {
 942     ciMethod* method = if_instr->profiled_method();
 943     assert(method != NULL, "method should be set if branch is profiled");
 944     ciMethodData* md = method->method_data_or_null();
 945     assert(md != NULL, "Sanity");
 946     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 947     assert(data != NULL, "must have profiling data");
 948     assert(data->is_BranchData(), "need BranchData for two-way branches");
 949     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 950     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 951     if (if_instr->is_swapped()) {
 952       int t = taken_count_offset;
 953       taken_count_offset = not_taken_count_offset;
 954       not_taken_count_offset = t;
 955     }
 956 
 957     LIR_Opr md_reg = new_register(T_METADATA);
 958     __ metadata2reg(md->constant_encoding(), md_reg);
 959 
 960     LIR_Opr data_offset_reg = new_pointer_register();
 961     __ cmove(lir_cond(cond),
 962              LIR_OprFact::intptrConst(taken_count_offset),
 963              LIR_OprFact::intptrConst(not_taken_count_offset),
 964              data_offset_reg, as_BasicType(if_instr->x()->type()));
 965 
 966     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 967     LIR_Opr data_reg = new_pointer_register();
 968     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 969     __ move(data_addr, data_reg);
 970     // Use leal instead of add to avoid destroying condition codes on x86
 971     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 972     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 973     __ move(data_reg, data_addr);
 974   }
 975 }
 976 
 977 // Phi technique:
 978 // This is about passing live values from one basic block to the other.
 979 // In code generated with Java it is rather rare that more than one
 980 // value is on the stack from one basic block to the other.
 981 // We optimize our technique for efficient passing of one value
 982 // (of type long, int, double..) but it can be extended.
 983 // When entering or leaving a basic block, all registers and all spill
 984 // slots are release and empty. We use the released registers
 985 // and spill slots to pass the live values from one block
 986 // to the other. The topmost value, i.e., the value on TOS of expression
 987 // stack is passed in registers. All other values are stored in spilling
 988 // area. Every Phi has an index which designates its spill slot
 989 // At exit of a basic block, we fill the register(s) and spill slots.
 990 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 991 // and locks necessary registers and spilling slots.
 992 
 993 
 994 // move current value to referenced phi function
 995 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 996   Phi* phi = sux_val->as_Phi();
 997   // cur_val can be null without phi being null in conjunction with inlining
 998   if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
 999     LIR_Opr operand = cur_val->operand();
1000     if (cur_val->operand()->is_illegal()) {
1001       assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002              "these can be produced lazily");
1003       operand = operand_for_instruction(cur_val);
1004     }
1005     resolver->move(operand, operand_for_instruction(phi));
1006   }
1007 }
1008 
1009 
1010 // Moves all stack values into their PHI position
1011 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012   BlockBegin* bb = block();
1013   if (bb->number_of_sux() == 1) {
1014     BlockBegin* sux = bb->sux_at(0);
1015     assert(sux->number_of_preds() > 0, "invalid CFG");
1016 
1017     // a block with only one predecessor never has phi functions
1018     if (sux->number_of_preds() > 1) {
1019       int max_phis = cur_state->stack_size() + cur_state->locals_size();
1020       PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1021 
1022       ValueStack* sux_state = sux->state();
1023       Value sux_value;
1024       int index;
1025 
1026       assert(cur_state->scope() == sux_state->scope(), "not matching");
1027       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1029 
1030       for_each_stack_value(sux_state, index, sux_value) {
1031         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032       }
1033 
1034       for_each_local_value(sux_state, index, sux_value) {
1035         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036       }
1037 
1038       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039     }
1040   }
1041 }
1042 
1043 
1044 LIR_Opr LIRGenerator::new_register(BasicType type) {
1045   int vreg = _virtual_register_number;
1046   // add a little fudge factor for the bailout, since the bailout is
1047   // only checked periodically.  This gives a few extra registers to
1048   // hand out before we really run out, which helps us keep from
1049   // tripping over assertions.
1050   if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1051     bailout("out of virtual registers");
1052     if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1053       // wrap it around
1054       _virtual_register_number = LIR_OprDesc::vreg_base;
1055     }
1056   }
1057   _virtual_register_number += 1;
1058   return LIR_OprFact::virtual_register(vreg, type);
1059 }
1060 
1061 
1062 // Try to lock using register in hint
1063 LIR_Opr LIRGenerator::rlock(Value instr) {
1064   return new_register(instr->type());
1065 }
1066 
1067 
1068 // does an rlock and sets result
1069 LIR_Opr LIRGenerator::rlock_result(Value x) {
1070   LIR_Opr reg = rlock(x);
1071   set_result(x, reg);
1072   return reg;
1073 }
1074 
1075 
1076 // does an rlock and sets result
1077 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078   LIR_Opr reg;
1079   switch (type) {
1080   case T_BYTE:
1081   case T_BOOLEAN:
1082     reg = rlock_byte(type);
1083     break;
1084   default:
1085     reg = rlock(x);
1086     break;
1087   }
1088 
1089   set_result(x, reg);
1090   return reg;
1091 }
1092 
1093 
1094 //---------------------------------------------------------------------
1095 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096   ObjectType* oc = value->type()->as_ObjectType();
1097   if (oc) {
1098     return oc->constant_value();
1099   }
1100   return NULL;
1101 }
1102 
1103 
1104 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1107 
1108   // no moves are created for phi functions at the begin of exception
1109   // handlers, so assign operands manually here
1110   for_each_phi_fun(block(), phi,
1111                    operand_for_instruction(phi));
1112 
1113   LIR_Opr thread_reg = getThreadPointer();
1114   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115                exceptionOopOpr());
1116   __ move_wide(LIR_OprFact::oopConst(NULL),
1117                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118   __ move_wide(LIR_OprFact::oopConst(NULL),
1119                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1120 
1121   LIR_Opr result = new_register(T_OBJECT);
1122   __ move(exceptionOopOpr(), result);
1123   set_result(x, result);
1124 }
1125 
1126 
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //                        visitor functions
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 
1137 void LIRGenerator::do_Phi(Phi* x) {
1138   // phi functions are never visited directly
1139   ShouldNotReachHere();
1140 }
1141 
1142 
1143 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1144 void LIRGenerator::do_Constant(Constant* x) {
1145   if (x->state_before() != NULL) {
1146     // Any constant with a ValueStack requires patching so emit the patch here
1147     LIR_Opr reg = rlock_result(x);
1148     CodeEmitInfo* info = state_for(x, x->state_before());
1149     __ oop2reg_patch(NULL, reg, info);
1150   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151     if (!x->is_pinned()) {
1152       // unpinned constants are handled specially so that they can be
1153       // put into registers when they are used multiple times within a
1154       // block.  After the block completes their operand will be
1155       // cleared so that other blocks can't refer to that register.
1156       set_result(x, load_constant(x));
1157     } else {
1158       LIR_Opr res = x->operand();
1159       if (!res->is_valid()) {
1160         res = LIR_OprFact::value_type(x->type());
1161       }
1162       if (res->is_constant()) {
1163         LIR_Opr reg = rlock_result(x);
1164         __ move(res, reg);
1165       } else {
1166         set_result(x, res);
1167       }
1168     }
1169   } else {
1170     set_result(x, LIR_OprFact::value_type(x->type()));
1171   }
1172 }
1173 
1174 
1175 void LIRGenerator::do_Local(Local* x) {
1176   // operand_for_instruction has the side effect of setting the result
1177   // so there's no need to do it here.
1178   operand_for_instruction(x);
1179 }
1180 
1181 
1182 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1183   Unimplemented();
1184 }
1185 
1186 
1187 void LIRGenerator::do_Return(Return* x) {
1188   if (compilation()->env()->dtrace_method_probes()) {
1189     BasicTypeList signature;
1190     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1191     signature.append(T_METADATA); // Method*
1192     LIR_OprList* args = new LIR_OprList();
1193     args->append(getThreadPointer());
1194     LIR_Opr meth = new_register(T_METADATA);
1195     __ metadata2reg(method()->constant_encoding(), meth);
1196     args->append(meth);
1197     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1198   }
1199 
1200   if (x->type()->is_void()) {
1201     __ return_op(LIR_OprFact::illegalOpr);
1202   } else {
1203     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204     LIRItem result(x->result(), this);
1205 
1206     result.load_item_force(reg);
1207     __ return_op(result.result());
1208   }
1209   set_no_result(x);
1210 }
1211 
1212 // Examble: ref.get()
1213 // Combination of LoadField and g1 pre-write barrier
1214 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1215 
1216   const int referent_offset = java_lang_ref_Reference::referent_offset;
1217   guarantee(referent_offset > 0, "referent offset not initialized");
1218 
1219   assert(x->number_of_arguments() == 1, "wrong type");
1220 
1221   LIRItem reference(x->argument_at(0), this);
1222   reference.load_item();
1223 
1224   // need to perform the null check on the reference objecy
1225   CodeEmitInfo* info = NULL;
1226   if (x->needs_null_check()) {
1227     info = state_for(x);
1228   }
1229 
1230   LIR_Address* referent_field_adr =
1231     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1232 
1233   LIR_Opr result = rlock_result(x);
1234 
1235   __ load(referent_field_adr, result, info);
1236 
1237   // Register the value in the referent field with the pre-barrier
1238   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1239               result /* pre_val */,
1240               false  /* do_load */,
1241               false  /* patch */,
1242               NULL   /* info */);
1243 }
1244 
1245 // Example: clazz.isInstance(object)
1246 void LIRGenerator::do_isInstance(Intrinsic* x) {
1247   assert(x->number_of_arguments() == 2, "wrong type");
1248 
1249   // TODO could try to substitute this node with an equivalent InstanceOf
1250   // if clazz is known to be a constant Class. This will pick up newly found
1251   // constants after HIR construction. I'll leave this to a future change.
1252 
1253   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1254   // could follow the aastore example in a future change.
1255 
1256   LIRItem clazz(x->argument_at(0), this);
1257   LIRItem object(x->argument_at(1), this);
1258   clazz.load_item();
1259   object.load_item();
1260   LIR_Opr result = rlock_result(x);
1261 
1262   // need to perform null check on clazz
1263   if (x->needs_null_check()) {
1264     CodeEmitInfo* info = state_for(x);
1265     __ null_check(clazz.result(), info);
1266   }
1267 
1268   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1269                                      CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1270                                      x->type(),
1271                                      NULL); // NULL CodeEmitInfo results in a leaf call
1272   __ move(call_result, result);
1273 }
1274 
1275 // Example: object.getClass ()
1276 void LIRGenerator::do_getClass(Intrinsic* x) {
1277   assert(x->number_of_arguments() == 1, "wrong type");
1278 
1279   LIRItem rcvr(x->argument_at(0), this);
1280   rcvr.load_item();
1281   LIR_Opr temp = new_register(T_METADATA);
1282   LIR_Opr result = rlock_result(x);
1283 
1284   // need to perform the null check on the rcvr
1285   CodeEmitInfo* info = NULL;
1286   if (x->needs_null_check()) {
1287     info = state_for(x);
1288   }
1289 
1290   // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1291   // meaning of these two is mixed up (see JDK-8026837).
1292   __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1293   __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1294 }
1295 
1296 
1297 // Example: Thread.currentThread()
1298 void LIRGenerator::do_currentThread(Intrinsic* x) {
1299   assert(x->number_of_arguments() == 0, "wrong type");
1300   LIR_Opr reg = rlock_result(x);
1301   __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1302 }
1303 
1304 
1305 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1306   assert(x->number_of_arguments() == 1, "wrong type");
1307   LIRItem receiver(x->argument_at(0), this);
1308 
1309   receiver.load_item();
1310   BasicTypeList signature;
1311   signature.append(T_OBJECT); // receiver
1312   LIR_OprList* args = new LIR_OprList();
1313   args->append(receiver.result());
1314   CodeEmitInfo* info = state_for(x, x->state());
1315   call_runtime(&signature, args,
1316                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1317                voidType, info);
1318 
1319   set_no_result(x);
1320 }
1321 
1322 
1323 //------------------------local access--------------------------------------
1324 
1325 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1326   if (x->operand()->is_illegal()) {
1327     Constant* c = x->as_Constant();
1328     if (c != NULL) {
1329       x->set_operand(LIR_OprFact::value_type(c->type()));
1330     } else {
1331       assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1332       // allocate a virtual register for this local or phi
1333       x->set_operand(rlock(x));
1334       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1335     }
1336   }
1337   return x->operand();
1338 }
1339 
1340 
1341 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1342   if (opr->is_virtual()) {
1343     return instruction_for_vreg(opr->vreg_number());
1344   }
1345   return NULL;
1346 }
1347 
1348 
1349 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1350   if (reg_num < _instruction_for_operand.length()) {
1351     return _instruction_for_operand.at(reg_num);
1352   }
1353   return NULL;
1354 }
1355 
1356 
1357 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1358   if (_vreg_flags.size_in_bits() == 0) {
1359     BitMap2D temp(100, num_vreg_flags);
1360     temp.clear();
1361     _vreg_flags = temp;
1362   }
1363   _vreg_flags.at_put_grow(vreg_num, f, true);
1364 }
1365 
1366 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1367   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1368     return false;
1369   }
1370   return _vreg_flags.at(vreg_num, f);
1371 }
1372 
1373 
1374 // Block local constant handling.  This code is useful for keeping
1375 // unpinned constants and constants which aren't exposed in the IR in
1376 // registers.  Unpinned Constant instructions have their operands
1377 // cleared when the block is finished so that other blocks can't end
1378 // up referring to their registers.
1379 
1380 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1381   assert(!x->is_pinned(), "only for unpinned constants");
1382   _unpinned_constants.append(x);
1383   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1384 }
1385 
1386 
1387 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1388   BasicType t = c->type();
1389   for (int i = 0; i < _constants.length(); i++) {
1390     LIR_Const* other = _constants.at(i);
1391     if (t == other->type()) {
1392       switch (t) {
1393       case T_INT:
1394       case T_FLOAT:
1395         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1396         break;
1397       case T_LONG:
1398       case T_DOUBLE:
1399         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1400         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1401         break;
1402       case T_OBJECT:
1403         if (c->as_jobject() != other->as_jobject()) continue;
1404         break;
1405       }
1406       return _reg_for_constants.at(i);
1407     }
1408   }
1409 
1410   LIR_Opr result = new_register(t);
1411   __ move((LIR_Opr)c, result);
1412   _constants.append(c);
1413   _reg_for_constants.append(result);
1414   return result;
1415 }
1416 
1417 // Various barriers
1418 
1419 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1420                                bool do_load, bool patch, CodeEmitInfo* info) {
1421   // Do the pre-write barrier, if any.
1422   switch (_bs->kind()) {
1423 #if INCLUDE_ALL_GCS
1424     case BarrierSet::G1SATBCTLogging:
1425       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1426       break;
1427 #endif // INCLUDE_ALL_GCS
1428     case BarrierSet::CardTableModRef:
1429     case BarrierSet::CardTableExtension:
1430       // No pre barriers
1431       break;
1432     case BarrierSet::ModRef:
1433       // No pre barriers
1434       break;
1435     default      :
1436       ShouldNotReachHere();
1437 
1438   }
1439 }
1440 
1441 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1442   switch (_bs->kind()) {
1443 #if INCLUDE_ALL_GCS
1444     case BarrierSet::G1SATBCTLogging:
1445       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1446       break;
1447 #endif // INCLUDE_ALL_GCS
1448     case BarrierSet::CardTableModRef:
1449     case BarrierSet::CardTableExtension:
1450       CardTableModRef_post_barrier(addr,  new_val);
1451       break;
1452     case BarrierSet::ModRef:
1453       // No post barriers
1454       break;
1455     default      :
1456       ShouldNotReachHere();
1457     }
1458 }
1459 
1460 ////////////////////////////////////////////////////////////////////////
1461 #if INCLUDE_ALL_GCS
1462 
1463 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1464                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1465   // First we test whether marking is in progress.
1466   BasicType flag_type;
1467   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1468     flag_type = T_INT;
1469   } else {
1470     guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1471               "Assumption");
1472     flag_type = T_BYTE;
1473   }
1474   LIR_Opr thrd = getThreadPointer();
1475   LIR_Address* mark_active_flag_addr =
1476     new LIR_Address(thrd,
1477                     in_bytes(JavaThread::satb_mark_queue_offset() +
1478                              PtrQueue::byte_offset_of_active()),
1479                     flag_type);
1480   // Read the marking-in-progress flag.
1481   LIR_Opr flag_val = new_register(T_INT);
1482   __ load(mark_active_flag_addr, flag_val);
1483   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1484 
1485   LIR_PatchCode pre_val_patch_code = lir_patch_none;
1486 
1487   CodeStub* slow;
1488 
1489   if (do_load) {
1490     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1491     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1492 
1493     if (patch)
1494       pre_val_patch_code = lir_patch_normal;
1495 
1496     pre_val = new_register(T_OBJECT);
1497 
1498     if (!addr_opr->is_address()) {
1499       assert(addr_opr->is_register(), "must be");
1500       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1501     }
1502     slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1503   } else {
1504     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1505     assert(pre_val->is_register(), "must be");
1506     assert(pre_val->type() == T_OBJECT, "must be an object");
1507     assert(info == NULL, "sanity");
1508 
1509     slow = new G1PreBarrierStub(pre_val);
1510   }
1511 
1512   __ branch(lir_cond_notEqual, T_INT, slow);
1513   __ branch_destination(slow->continuation());
1514 }
1515 
1516 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1517   // If the "new_val" is a constant NULL, no barrier is necessary.
1518   if (new_val->is_constant() &&
1519       new_val->as_constant_ptr()->as_jobject() == NULL) return;
1520 
1521   if (!new_val->is_register()) {
1522     LIR_Opr new_val_reg = new_register(T_OBJECT);
1523     if (new_val->is_constant()) {
1524       __ move(new_val, new_val_reg);
1525     } else {
1526       __ leal(new_val, new_val_reg);
1527     }
1528     new_val = new_val_reg;
1529   }
1530   assert(new_val->is_register(), "must be a register at this point");
1531 
1532   if (addr->is_address()) {
1533     LIR_Address* address = addr->as_address_ptr();
1534     LIR_Opr ptr = new_pointer_register();
1535     if (!address->index()->is_valid() && address->disp() == 0) {
1536       __ move(address->base(), ptr);
1537     } else {
1538       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1539       __ leal(addr, ptr);
1540     }
1541     addr = ptr;
1542   }
1543   assert(addr->is_register(), "must be a register at this point");
1544 
1545   LIR_Opr xor_res = new_pointer_register();
1546   LIR_Opr xor_shift_res = new_pointer_register();
1547   if (TwoOperandLIRForm ) {
1548     __ move(addr, xor_res);
1549     __ logical_xor(xor_res, new_val, xor_res);
1550     __ move(xor_res, xor_shift_res);
1551     __ unsigned_shift_right(xor_shift_res,
1552                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1553                             xor_shift_res,
1554                             LIR_OprDesc::illegalOpr());
1555   } else {
1556     __ logical_xor(addr, new_val, xor_res);
1557     __ unsigned_shift_right(xor_res,
1558                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1559                             xor_shift_res,
1560                             LIR_OprDesc::illegalOpr());
1561   }
1562 
1563   if (!new_val->is_register()) {
1564     LIR_Opr new_val_reg = new_register(T_OBJECT);
1565     __ leal(new_val, new_val_reg);
1566     new_val = new_val_reg;
1567   }
1568   assert(new_val->is_register(), "must be a register at this point");
1569 
1570   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1571 
1572   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1573   __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1574   __ branch_destination(slow->continuation());
1575 }
1576 
1577 #endif // INCLUDE_ALL_GCS
1578 ////////////////////////////////////////////////////////////////////////
1579 
1580 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1581   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
1582   assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
1583   LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
1584   if (addr->is_address()) {
1585     LIR_Address* address = addr->as_address_ptr();
1586     // ptr cannot be an object because we use this barrier for array card marks
1587     // and addr can point in the middle of an array.
1588     LIR_Opr ptr = new_pointer_register();
1589     if (!address->index()->is_valid() && address->disp() == 0) {
1590       __ move(address->base(), ptr);
1591     } else {
1592       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1593       __ leal(addr, ptr);
1594     }
1595     addr = ptr;
1596   }
1597   assert(addr->is_register(), "must be a register at this point");
1598 
1599 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1600   CardTableModRef_post_barrier_helper(addr, card_table_base);
1601 #else
1602   LIR_Opr tmp = new_pointer_register();
1603   if (TwoOperandLIRForm) {
1604     __ move(addr, tmp);
1605     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1606   } else {
1607     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1608   }
1609 
1610   LIR_Address* card_addr;
1611   if (can_inline_as_constant(card_table_base)) {
1612     card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
1613   } else {
1614     card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
1615   }
1616 
1617   LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
1618   if (UseCondCardMark) {
1619     LIR_Opr cur_value = new_register(T_INT);
1620     __ move(card_addr, cur_value);
1621 
1622     LabelObj* L_already_dirty = new LabelObj();
1623     __ cmp(lir_cond_equal, cur_value, dirty);
1624     __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
1625     __ move(dirty, card_addr);
1626     __ branch_destination(L_already_dirty->label());
1627   } else {
1628     __ move(dirty, card_addr);
1629   }
1630 #endif
1631 }
1632 
1633 
1634 //------------------------field access--------------------------------------
1635 
1636 // Comment copied form templateTable_i486.cpp
1637 // ----------------------------------------------------------------------------
1638 // Volatile variables demand their effects be made known to all CPU's in
1639 // order.  Store buffers on most chips allow reads & writes to reorder; the
1640 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1641 // memory barrier (i.e., it's not sufficient that the interpreter does not
1642 // reorder volatile references, the hardware also must not reorder them).
1643 //
1644 // According to the new Java Memory Model (JMM):
1645 // (1) All volatiles are serialized wrt to each other.
1646 // ALSO reads & writes act as aquire & release, so:
1647 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1648 // the read float up to before the read.  It's OK for non-volatile memory refs
1649 // that happen before the volatile read to float down below it.
1650 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1651 // that happen BEFORE the write float down to after the write.  It's OK for
1652 // non-volatile memory refs that happen after the volatile write to float up
1653 // before it.
1654 //
1655 // We only put in barriers around volatile refs (they are expensive), not
1656 // _between_ memory refs (that would require us to track the flavor of the
1657 // previous memory refs).  Requirements (2) and (3) require some barriers
1658 // before volatile stores and after volatile loads.  These nearly cover
1659 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1660 // case is placed after volatile-stores although it could just as well go
1661 // before volatile-loads.
1662 
1663 
1664 void LIRGenerator::do_StoreField(StoreField* x) {
1665   bool needs_patching = x->needs_patching();
1666   bool is_volatile = x->field()->is_volatile();
1667   BasicType field_type = x->field_type();
1668   bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1669 
1670   CodeEmitInfo* info = NULL;
1671   if (needs_patching) {
1672     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1673     info = state_for(x, x->state_before());
1674   } else if (x->needs_null_check()) {
1675     NullCheck* nc = x->explicit_null_check();
1676     if (nc == NULL) {
1677       info = state_for(x);
1678     } else {
1679       info = state_for(nc);
1680     }
1681   }
1682 
1683 
1684   LIRItem object(x->obj(), this);
1685   LIRItem value(x->value(),  this);
1686 
1687   object.load_item();
1688 
1689   if (is_volatile || needs_patching) {
1690     // load item if field is volatile (fewer special cases for volatiles)
1691     // load item if field not initialized
1692     // load item if field not constant
1693     // because of code patching we cannot inline constants
1694     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1695       value.load_byte_item();
1696     } else  {
1697       value.load_item();
1698     }
1699   } else {
1700     value.load_for_store(field_type);
1701   }
1702 
1703   set_no_result(x);
1704 
1705 #ifndef PRODUCT
1706   if (PrintNotLoaded && needs_patching) {
1707     tty->print_cr("   ###class not loaded at store_%s bci %d",
1708                   x->is_static() ?  "static" : "field", x->printable_bci());
1709   }
1710 #endif
1711 
1712   if (x->needs_null_check() &&
1713       (needs_patching ||
1714        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1715     // emit an explicit null check because the offset is too large
1716     __ null_check(object.result(), new CodeEmitInfo(info));
1717   }
1718 
1719   LIR_Address* address;
1720   if (needs_patching) {
1721     // we need to patch the offset in the instruction so don't allow
1722     // generate_address to try to be smart about emitting the -1.
1723     // Otherwise the patching code won't know how to find the
1724     // instruction to patch.
1725     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1726   } else {
1727     address = generate_address(object.result(), x->offset(), field_type);
1728   }
1729 
1730   if (is_volatile && os::is_MP()) {
1731     __ membar_release();
1732   }
1733 
1734   if (is_oop) {
1735     // Do the pre-write barrier, if any.
1736     pre_barrier(LIR_OprFact::address(address),
1737                 LIR_OprFact::illegalOpr /* pre_val */,
1738                 true /* do_load*/,
1739                 needs_patching,
1740                 (info ? new CodeEmitInfo(info) : NULL));
1741   }
1742 
1743   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1744   if (needs_atomic_access && !needs_patching) {
1745     volatile_field_store(value.result(), address, info);
1746   } else {
1747     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1748     __ store(value.result(), address, info, patch_code);
1749   }
1750 
1751   if (is_oop) {
1752     // Store to object so mark the card of the header
1753     post_barrier(object.result(), value.result());
1754   }
1755 
1756   if (is_volatile && os::is_MP()) {
1757     __ membar();
1758   }
1759 }
1760 
1761 
1762 void LIRGenerator::do_LoadField(LoadField* x) {
1763   bool needs_patching = x->needs_patching();
1764   bool is_volatile = x->field()->is_volatile();
1765   BasicType field_type = x->field_type();
1766 
1767   CodeEmitInfo* info = NULL;
1768   if (needs_patching) {
1769     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1770     info = state_for(x, x->state_before());
1771   } else if (x->needs_null_check()) {
1772     NullCheck* nc = x->explicit_null_check();
1773     if (nc == NULL) {
1774       info = state_for(x);
1775     } else {
1776       info = state_for(nc);
1777     }
1778   }
1779 
1780   LIRItem object(x->obj(), this);
1781 
1782   object.load_item();
1783 
1784 #ifndef PRODUCT
1785   if (PrintNotLoaded && needs_patching) {
1786     tty->print_cr("   ###class not loaded at load_%s bci %d",
1787                   x->is_static() ?  "static" : "field", x->printable_bci());
1788   }
1789 #endif
1790 
1791   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1792   if (x->needs_null_check() &&
1793       (needs_patching ||
1794        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1795        stress_deopt)) {
1796     LIR_Opr obj = object.result();
1797     if (stress_deopt) {
1798       obj = new_register(T_OBJECT);
1799       __ move(LIR_OprFact::oopConst(NULL), obj);
1800     }
1801     // emit an explicit null check because the offset is too large
1802     __ null_check(obj, new CodeEmitInfo(info));
1803   }
1804 
1805   LIR_Opr reg = rlock_result(x, field_type);
1806   LIR_Address* address;
1807   if (needs_patching) {
1808     // we need to patch the offset in the instruction so don't allow
1809     // generate_address to try to be smart about emitting the -1.
1810     // Otherwise the patching code won't know how to find the
1811     // instruction to patch.
1812     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1813   } else {
1814     address = generate_address(object.result(), x->offset(), field_type);
1815   }
1816 
1817   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1818   if (needs_atomic_access && !needs_patching) {
1819     volatile_field_load(address, reg, info);
1820   } else {
1821     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1822     __ load(address, reg, info, patch_code);
1823   }
1824 
1825   if (is_volatile && os::is_MP()) {
1826     __ membar_acquire();
1827   }
1828 }
1829 
1830 
1831 //------------------------java.nio.Buffer.checkIndex------------------------
1832 
1833 // int java.nio.Buffer.checkIndex(int)
1834 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1835   // NOTE: by the time we are in checkIndex() we are guaranteed that
1836   // the buffer is non-null (because checkIndex is package-private and
1837   // only called from within other methods in the buffer).
1838   assert(x->number_of_arguments() == 2, "wrong type");
1839   LIRItem buf  (x->argument_at(0), this);
1840   LIRItem index(x->argument_at(1), this);
1841   buf.load_item();
1842   index.load_item();
1843 
1844   LIR_Opr result = rlock_result(x);
1845   if (GenerateRangeChecks) {
1846     CodeEmitInfo* info = state_for(x);
1847     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1848     if (index.result()->is_constant()) {
1849       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1850       __ branch(lir_cond_belowEqual, T_INT, stub);
1851     } else {
1852       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1853                   java_nio_Buffer::limit_offset(), T_INT, info);
1854       __ branch(lir_cond_aboveEqual, T_INT, stub);
1855     }
1856     __ move(index.result(), result);
1857   } else {
1858     // Just load the index into the result register
1859     __ move(index.result(), result);
1860   }
1861 }
1862 
1863 
1864 //------------------------array access--------------------------------------
1865 
1866 
1867 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1868   LIRItem array(x->array(), this);
1869   array.load_item();
1870   LIR_Opr reg = rlock_result(x);
1871 
1872   CodeEmitInfo* info = NULL;
1873   if (x->needs_null_check()) {
1874     NullCheck* nc = x->explicit_null_check();
1875     if (nc == NULL) {
1876       info = state_for(x);
1877     } else {
1878       info = state_for(nc);
1879     }
1880     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1881       LIR_Opr obj = new_register(T_OBJECT);
1882       __ move(LIR_OprFact::oopConst(NULL), obj);
1883       __ null_check(obj, new CodeEmitInfo(info));
1884     }
1885   }
1886   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1887 }
1888 
1889 
1890 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1891   bool use_length = x->length() != NULL;
1892   LIRItem array(x->array(), this);
1893   LIRItem index(x->index(), this);
1894   LIRItem length(this);
1895   bool needs_range_check = x->compute_needs_range_check();
1896 
1897   if (use_length && needs_range_check) {
1898     length.set_instruction(x->length());
1899     length.load_item();
1900   }
1901 
1902   array.load_item();
1903   if (index.is_constant() && can_inline_as_constant(x->index())) {
1904     // let it be a constant
1905     index.dont_load_item();
1906   } else {
1907     index.load_item();
1908   }
1909 
1910   CodeEmitInfo* range_check_info = state_for(x);
1911   CodeEmitInfo* null_check_info = NULL;
1912   if (x->needs_null_check()) {
1913     NullCheck* nc = x->explicit_null_check();
1914     if (nc != NULL) {
1915       null_check_info = state_for(nc);
1916     } else {
1917       null_check_info = range_check_info;
1918     }
1919     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1920       LIR_Opr obj = new_register(T_OBJECT);
1921       __ move(LIR_OprFact::oopConst(NULL), obj);
1922       __ null_check(obj, new CodeEmitInfo(null_check_info));
1923     }
1924   }
1925 
1926   // emit array address setup early so it schedules better
1927   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1928 
1929   if (GenerateRangeChecks && needs_range_check) {
1930     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1931       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1932     } else if (use_length) {
1933       // TODO: use a (modified) version of array_range_check that does not require a
1934       //       constant length to be loaded to a register
1935       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1936       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1937     } else {
1938       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1939       // The range check performs the null check, so clear it out for the load
1940       null_check_info = NULL;
1941     }
1942   }
1943 
1944   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1945 }
1946 
1947 
1948 void LIRGenerator::do_NullCheck(NullCheck* x) {
1949   if (x->can_trap()) {
1950     LIRItem value(x->obj(), this);
1951     value.load_item();
1952     CodeEmitInfo* info = state_for(x);
1953     __ null_check(value.result(), info);
1954   }
1955 }
1956 
1957 
1958 void LIRGenerator::do_TypeCast(TypeCast* x) {
1959   LIRItem value(x->obj(), this);
1960   value.load_item();
1961   // the result is the same as from the node we are casting
1962   set_result(x, value.result());
1963 }
1964 
1965 
1966 void LIRGenerator::do_Throw(Throw* x) {
1967   LIRItem exception(x->exception(), this);
1968   exception.load_item();
1969   set_no_result(x);
1970   LIR_Opr exception_opr = exception.result();
1971   CodeEmitInfo* info = state_for(x, x->state());
1972 
1973 #ifndef PRODUCT
1974   if (PrintC1Statistics) {
1975     increment_counter(Runtime1::throw_count_address(), T_INT);
1976   }
1977 #endif
1978 
1979   // check if the instruction has an xhandler in any of the nested scopes
1980   bool unwind = false;
1981   if (info->exception_handlers()->length() == 0) {
1982     // this throw is not inside an xhandler
1983     unwind = true;
1984   } else {
1985     // get some idea of the throw type
1986     bool type_is_exact = true;
1987     ciType* throw_type = x->exception()->exact_type();
1988     if (throw_type == NULL) {
1989       type_is_exact = false;
1990       throw_type = x->exception()->declared_type();
1991     }
1992     if (throw_type != NULL && throw_type->is_instance_klass()) {
1993       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1994       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1995     }
1996   }
1997 
1998   // do null check before moving exception oop into fixed register
1999   // to avoid a fixed interval with an oop during the null check.
2000   // Use a copy of the CodeEmitInfo because debug information is
2001   // different for null_check and throw.
2002   if (GenerateCompilerNullChecks &&
2003       (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2004     // if the exception object wasn't created using new then it might be null.
2005     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2006   }
2007 
2008   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2009     // we need to go through the exception lookup path to get JVMTI
2010     // notification done
2011     unwind = false;
2012   }
2013 
2014   // move exception oop into fixed register
2015   __ move(exception_opr, exceptionOopOpr());
2016 
2017   if (unwind) {
2018     __ unwind_exception(exceptionOopOpr());
2019   } else {
2020     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2021   }
2022 }
2023 
2024 
2025 void LIRGenerator::do_RoundFP(RoundFP* x) {
2026   LIRItem input(x->input(), this);
2027   input.load_item();
2028   LIR_Opr input_opr = input.result();
2029   assert(input_opr->is_register(), "why round if value is not in a register?");
2030   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2031   if (input_opr->is_single_fpu()) {
2032     set_result(x, round_item(input_opr)); // This code path not currently taken
2033   } else {
2034     LIR_Opr result = new_register(T_DOUBLE);
2035     set_vreg_flag(result, must_start_in_memory);
2036     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2037     set_result(x, result);
2038   }
2039 }
2040 
2041 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2042 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2043 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2044   LIRItem base(x->base(), this);
2045   LIRItem idx(this);
2046 
2047   base.load_item();
2048   if (x->has_index()) {
2049     idx.set_instruction(x->index());
2050     idx.load_nonconstant();
2051   }
2052 
2053   LIR_Opr reg = rlock_result(x, x->basic_type());
2054 
2055   int   log2_scale = 0;
2056   if (x->has_index()) {
2057     log2_scale = x->log2_scale();
2058   }
2059 
2060   assert(!x->has_index() || idx.value() == x->index(), "should match");
2061 
2062   LIR_Opr base_op = base.result();
2063   LIR_Opr index_op = idx.result();
2064 #ifndef _LP64
2065   if (base_op->type() == T_LONG) {
2066     base_op = new_register(T_INT);
2067     __ convert(Bytecodes::_l2i, base.result(), base_op);
2068   }
2069   if (x->has_index()) {
2070     if (index_op->type() == T_LONG) {
2071       LIR_Opr long_index_op = index_op;
2072       if (index_op->is_constant()) {
2073         long_index_op = new_register(T_LONG);
2074         __ move(index_op, long_index_op);
2075       }
2076       index_op = new_register(T_INT);
2077       __ convert(Bytecodes::_l2i, long_index_op, index_op);
2078     } else {
2079       assert(x->index()->type()->tag() == intTag, "must be");
2080     }
2081   }
2082   // At this point base and index should be all ints.
2083   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2084   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2085 #else
2086   if (x->has_index()) {
2087     if (index_op->type() == T_INT) {
2088       if (!index_op->is_constant()) {
2089         index_op = new_register(T_LONG);
2090         __ convert(Bytecodes::_i2l, idx.result(), index_op);
2091       }
2092     } else {
2093       assert(index_op->type() == T_LONG, "must be");
2094       if (index_op->is_constant()) {
2095         index_op = new_register(T_LONG);
2096         __ move(idx.result(), index_op);
2097       }
2098     }
2099   }
2100   // At this point base is a long non-constant
2101   // Index is a long register or a int constant.
2102   // We allow the constant to stay an int because that would allow us a more compact encoding by
2103   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2104   // move it into a register first.
2105   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2106   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2107                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2108 #endif
2109 
2110   BasicType dst_type = x->basic_type();
2111 
2112   LIR_Address* addr;
2113   if (index_op->is_constant()) {
2114     assert(log2_scale == 0, "must not have a scale");
2115     assert(index_op->type() == T_INT, "only int constants supported");
2116     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2117   } else {
2118 #ifdef X86
2119     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2120 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2121     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2122 #else
2123     if (index_op->is_illegal() || log2_scale == 0) {
2124       addr = new LIR_Address(base_op, index_op, dst_type);
2125     } else {
2126       LIR_Opr tmp = new_pointer_register();
2127       __ shift_left(index_op, log2_scale, tmp);
2128       addr = new LIR_Address(base_op, tmp, dst_type);
2129     }
2130 #endif
2131   }
2132 
2133   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2134     __ unaligned_move(addr, reg);
2135   } else {
2136     if (dst_type == T_OBJECT && x->is_wide()) {
2137       __ move_wide(addr, reg);
2138     } else {
2139       __ move(addr, reg);
2140     }
2141   }
2142 }
2143 
2144 
2145 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2146   int  log2_scale = 0;
2147   BasicType type = x->basic_type();
2148 
2149   if (x->has_index()) {
2150     log2_scale = x->log2_scale();
2151   }
2152 
2153   LIRItem base(x->base(), this);
2154   LIRItem value(x->value(), this);
2155   LIRItem idx(this);
2156 
2157   base.load_item();
2158   if (x->has_index()) {
2159     idx.set_instruction(x->index());
2160     idx.load_item();
2161   }
2162 
2163   if (type == T_BYTE || type == T_BOOLEAN) {
2164     value.load_byte_item();
2165   } else {
2166     value.load_item();
2167   }
2168 
2169   set_no_result(x);
2170 
2171   LIR_Opr base_op = base.result();
2172   LIR_Opr index_op = idx.result();
2173 
2174 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2175   LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2176 #else
2177 #ifndef _LP64
2178   if (base_op->type() == T_LONG) {
2179     base_op = new_register(T_INT);
2180     __ convert(Bytecodes::_l2i, base.result(), base_op);
2181   }
2182   if (x->has_index()) {
2183     if (index_op->type() == T_LONG) {
2184       index_op = new_register(T_INT);
2185       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2186     }
2187   }
2188   // At this point base and index should be all ints and not constants
2189   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2190   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2191 #else
2192   if (x->has_index()) {
2193     if (index_op->type() == T_INT) {
2194       index_op = new_register(T_LONG);
2195       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2196     }
2197   }
2198   // At this point base and index are long and non-constant
2199   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2200   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2201 #endif
2202 
2203   if (log2_scale != 0) {
2204     // temporary fix (platform dependent code without shift on Intel would be better)
2205     // TODO: ARM also allows embedded shift in the address
2206     __ shift_left(index_op, log2_scale, index_op);
2207   }
2208 
2209   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2210 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2211   __ move(value.result(), addr);
2212 }
2213 
2214 
2215 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2216   BasicType type = x->basic_type();
2217   LIRItem src(x->object(), this);
2218   LIRItem off(x->offset(), this);
2219 
2220   off.load_item();
2221   src.load_item();
2222 
2223   LIR_Opr value = rlock_result(x, x->basic_type());
2224 
2225   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2226 
2227 #if INCLUDE_ALL_GCS
2228   // We might be reading the value of the referent field of a
2229   // Reference object in order to attach it back to the live
2230   // object graph. If G1 is enabled then we need to record
2231   // the value that is being returned in an SATB log buffer.
2232   //
2233   // We need to generate code similar to the following...
2234   //
2235   // if (offset == java_lang_ref_Reference::referent_offset) {
2236   //   if (src != NULL) {
2237   //     if (klass(src)->reference_type() != REF_NONE) {
2238   //       pre_barrier(..., value, ...);
2239   //     }
2240   //   }
2241   // }
2242 
2243   if (UseG1GC && type == T_OBJECT) {
2244     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2245     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2246     bool gen_source_check = true;    // Assume we need to check the src object for null.
2247     bool gen_type_check = true;      // Assume we need to check the reference_type.
2248 
2249     if (off.is_constant()) {
2250       jlong off_con = (off.type()->is_int() ?
2251                         (jlong) off.get_jint_constant() :
2252                         off.get_jlong_constant());
2253 
2254 
2255       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2256         // The constant offset is something other than referent_offset.
2257         // We can skip generating/checking the remaining guards and
2258         // skip generation of the code stub.
2259         gen_pre_barrier = false;
2260       } else {
2261         // The constant offset is the same as referent_offset -
2262         // we do not need to generate a runtime offset check.
2263         gen_offset_check = false;
2264       }
2265     }
2266 
2267     // We don't need to generate stub if the source object is an array
2268     if (gen_pre_barrier && src.type()->is_array()) {
2269       gen_pre_barrier = false;
2270     }
2271 
2272     if (gen_pre_barrier) {
2273       // We still need to continue with the checks.
2274       if (src.is_constant()) {
2275         ciObject* src_con = src.get_jobject_constant();
2276         guarantee(src_con != NULL, "no source constant");
2277 
2278         if (src_con->is_null_object()) {
2279           // The constant src object is null - We can skip
2280           // generating the code stub.
2281           gen_pre_barrier = false;
2282         } else {
2283           // Non-null constant source object. We still have to generate
2284           // the slow stub - but we don't need to generate the runtime
2285           // null object check.
2286           gen_source_check = false;
2287         }
2288       }
2289     }
2290     if (gen_pre_barrier && !PatchALot) {
2291       // Can the klass of object be statically determined to be
2292       // a sub-class of Reference?
2293       ciType* type = src.value()->declared_type();
2294       if ((type != NULL) && type->is_loaded()) {
2295         if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2296           gen_type_check = false;
2297         } else if (type->is_klass() &&
2298                    !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2299           // Not Reference and not Object klass.
2300           gen_pre_barrier = false;
2301         }
2302       }
2303     }
2304 
2305     if (gen_pre_barrier) {
2306       LabelObj* Lcont = new LabelObj();
2307 
2308       // We can have generate one runtime check here. Let's start with
2309       // the offset check.
2310       if (gen_offset_check) {
2311         // if (offset != referent_offset) -> continue
2312         // If offset is an int then we can do the comparison with the
2313         // referent_offset constant; otherwise we need to move
2314         // referent_offset into a temporary register and generate
2315         // a reg-reg compare.
2316 
2317         LIR_Opr referent_off;
2318 
2319         if (off.type()->is_int()) {
2320           referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2321         } else {
2322           assert(off.type()->is_long(), "what else?");
2323           referent_off = new_register(T_LONG);
2324           __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2325         }
2326         __ cmp(lir_cond_notEqual, off.result(), referent_off);
2327         __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2328       }
2329       if (gen_source_check) {
2330         // offset is a const and equals referent offset
2331         // if (source == null) -> continue
2332         __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2333         __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2334       }
2335       LIR_Opr src_klass = new_register(T_OBJECT);
2336       if (gen_type_check) {
2337         // We have determined that offset == referent_offset && src != null.
2338         // if (src->_klass->_reference_type == REF_NONE) -> continue
2339         __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2340         LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2341         LIR_Opr reference_type = new_register(T_INT);
2342         __ move(reference_type_addr, reference_type);
2343         __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2344         __ branch(lir_cond_equal, T_INT, Lcont->label());
2345       }
2346       {
2347         // We have determined that src->_klass->_reference_type != REF_NONE
2348         // so register the value in the referent field with the pre-barrier.
2349         pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2350                     value  /* pre_val */,
2351                     false  /* do_load */,
2352                     false  /* patch */,
2353                     NULL   /* info */);
2354       }
2355       __ branch_destination(Lcont->label());
2356     }
2357   }
2358 #endif // INCLUDE_ALL_GCS
2359 
2360   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2361 }
2362 
2363 
2364 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2365   BasicType type = x->basic_type();
2366   LIRItem src(x->object(), this);
2367   LIRItem off(x->offset(), this);
2368   LIRItem data(x->value(), this);
2369 
2370   src.load_item();
2371   if (type == T_BOOLEAN || type == T_BYTE) {
2372     data.load_byte_item();
2373   } else {
2374     data.load_item();
2375   }
2376   off.load_item();
2377 
2378   set_no_result(x);
2379 
2380   if (x->is_volatile() && os::is_MP()) __ membar_release();
2381   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2382   if (x->is_volatile() && os::is_MP()) __ membar();
2383 }
2384 
2385 
2386 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2387   int lng = x->length();
2388 
2389   for (int i = 0; i < lng; i++) {
2390     SwitchRange* one_range = x->at(i);
2391     int low_key = one_range->low_key();
2392     int high_key = one_range->high_key();
2393     BlockBegin* dest = one_range->sux();
2394     if (low_key == high_key) {
2395       __ cmp(lir_cond_equal, value, low_key);
2396       __ branch(lir_cond_equal, T_INT, dest);
2397     } else if (high_key - low_key == 1) {
2398       __ cmp(lir_cond_equal, value, low_key);
2399       __ branch(lir_cond_equal, T_INT, dest);
2400       __ cmp(lir_cond_equal, value, high_key);
2401       __ branch(lir_cond_equal, T_INT, dest);
2402     } else {
2403       LabelObj* L = new LabelObj();
2404       __ cmp(lir_cond_less, value, low_key);
2405       __ branch(lir_cond_less, T_INT, L->label());
2406       __ cmp(lir_cond_lessEqual, value, high_key);
2407       __ branch(lir_cond_lessEqual, T_INT, dest);
2408       __ branch_destination(L->label());
2409     }
2410   }
2411   __ jump(default_sux);
2412 }
2413 
2414 
2415 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2416   SwitchRangeList* res = new SwitchRangeList();
2417   int len = x->length();
2418   if (len > 0) {
2419     BlockBegin* sux = x->sux_at(0);
2420     int key = x->lo_key();
2421     BlockBegin* default_sux = x->default_sux();
2422     SwitchRange* range = new SwitchRange(key, sux);
2423     for (int i = 0; i < len; i++, key++) {
2424       BlockBegin* new_sux = x->sux_at(i);
2425       if (sux == new_sux) {
2426         // still in same range
2427         range->set_high_key(key);
2428       } else {
2429         // skip tests which explicitly dispatch to the default
2430         if (sux != default_sux) {
2431           res->append(range);
2432         }
2433         range = new SwitchRange(key, new_sux);
2434       }
2435       sux = new_sux;
2436     }
2437     if (res->length() == 0 || res->last() != range)  res->append(range);
2438   }
2439   return res;
2440 }
2441 
2442 
2443 // we expect the keys to be sorted by increasing value
2444 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2445   SwitchRangeList* res = new SwitchRangeList();
2446   int len = x->length();
2447   if (len > 0) {
2448     BlockBegin* default_sux = x->default_sux();
2449     int key = x->key_at(0);
2450     BlockBegin* sux = x->sux_at(0);
2451     SwitchRange* range = new SwitchRange(key, sux);
2452     for (int i = 1; i < len; i++) {
2453       int new_key = x->key_at(i);
2454       BlockBegin* new_sux = x->sux_at(i);
2455       if (key+1 == new_key && sux == new_sux) {
2456         // still in same range
2457         range->set_high_key(new_key);
2458       } else {
2459         // skip tests which explicitly dispatch to the default
2460         if (range->sux() != default_sux) {
2461           res->append(range);
2462         }
2463         range = new SwitchRange(new_key, new_sux);
2464       }
2465       key = new_key;
2466       sux = new_sux;
2467     }
2468     if (res->length() == 0 || res->last() != range)  res->append(range);
2469   }
2470   return res;
2471 }
2472 
2473 
2474 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2475   LIRItem tag(x->tag(), this);
2476   tag.load_item();
2477   set_no_result(x);
2478 
2479   if (x->is_safepoint()) {
2480     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2481   }
2482 
2483   // move values into phi locations
2484   move_to_phi(x->state());
2485 
2486   int lo_key = x->lo_key();
2487   int hi_key = x->hi_key();
2488   int len = x->length();
2489   LIR_Opr value = tag.result();
2490   if (UseTableRanges) {
2491     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2492   } else {
2493     for (int i = 0; i < len; i++) {
2494       __ cmp(lir_cond_equal, value, i + lo_key);
2495       __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2496     }
2497     __ jump(x->default_sux());
2498   }
2499 }
2500 
2501 
2502 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2503   LIRItem tag(x->tag(), this);
2504   tag.load_item();
2505   set_no_result(x);
2506 
2507   if (x->is_safepoint()) {
2508     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2509   }
2510 
2511   // move values into phi locations
2512   move_to_phi(x->state());
2513 
2514   LIR_Opr value = tag.result();
2515   if (UseTableRanges) {
2516     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2517   } else {
2518     int len = x->length();
2519     for (int i = 0; i < len; i++) {
2520       __ cmp(lir_cond_equal, value, x->key_at(i));
2521       __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2522     }
2523     __ jump(x->default_sux());
2524   }
2525 }
2526 
2527 
2528 void LIRGenerator::do_Goto(Goto* x) {
2529   set_no_result(x);
2530 
2531   if (block()->next()->as_OsrEntry()) {
2532     // need to free up storage used for OSR entry point
2533     LIR_Opr osrBuffer = block()->next()->operand();
2534     BasicTypeList signature;
2535     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2536     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2537     __ move(osrBuffer, cc->args()->at(0));
2538     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2539                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2540   }
2541 
2542   if (x->is_safepoint()) {
2543     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2544 
2545     // increment backedge counter if needed
2546     CodeEmitInfo* info = state_for(x, state);
2547     increment_backedge_counter(info, x->profiled_bci());
2548     CodeEmitInfo* safepoint_info = state_for(x, state);
2549     __ safepoint(safepoint_poll_register(), safepoint_info);
2550   }
2551 
2552   // Gotos can be folded Ifs, handle this case.
2553   if (x->should_profile()) {
2554     ciMethod* method = x->profiled_method();
2555     assert(method != NULL, "method should be set if branch is profiled");
2556     ciMethodData* md = method->method_data_or_null();
2557     assert(md != NULL, "Sanity");
2558     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2559     assert(data != NULL, "must have profiling data");
2560     int offset;
2561     if (x->direction() == Goto::taken) {
2562       assert(data->is_BranchData(), "need BranchData for two-way branches");
2563       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2564     } else if (x->direction() == Goto::not_taken) {
2565       assert(data->is_BranchData(), "need BranchData for two-way branches");
2566       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2567     } else {
2568       assert(data->is_JumpData(), "need JumpData for branches");
2569       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2570     }
2571     LIR_Opr md_reg = new_register(T_METADATA);
2572     __ metadata2reg(md->constant_encoding(), md_reg);
2573 
2574     increment_counter(new LIR_Address(md_reg, offset,
2575                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2576   }
2577 
2578   // emit phi-instruction move after safepoint since this simplifies
2579   // describing the state as the safepoint.
2580   move_to_phi(x->state());
2581 
2582   __ jump(x->default_sux());
2583 }
2584 
2585 /**
2586  * Emit profiling code if needed for arguments, parameters, return value types
2587  *
2588  * @param md                    MDO the code will update at runtime
2589  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2590  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2591  * @param profiled_k            current profile
2592  * @param obj                   IR node for the object to be profiled
2593  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2594  *                              Set once we find an update to make and use for next ones.
2595  * @param not_null              true if we know obj cannot be null
2596  * @param signature_at_call_k   signature at call for obj
2597  * @param callee_signature_k    signature of callee for obj
2598  *                              at call and callee signatures differ at method handle call
2599  * @return                      the only klass we know will ever be seen at this profile point
2600  */
2601 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2602                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2603                                     ciKlass* callee_signature_k) {
2604   ciKlass* result = NULL;
2605   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2606   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2607   // known not to be null or null bit already set and already set to
2608   // unknown: nothing we can do to improve profiling
2609   if (!do_null && !do_update) {
2610     return result;
2611   }
2612 
2613   ciKlass* exact_klass = NULL;
2614   Compilation* comp = Compilation::current();
2615   if (do_update) {
2616     // try to find exact type, using CHA if possible, so that loading
2617     // the klass from the object can be avoided
2618     ciType* type = obj->exact_type();
2619     if (type == NULL) {
2620       type = obj->declared_type();
2621       type = comp->cha_exact_type(type);
2622     }
2623     assert(type == NULL || type->is_klass(), "type should be class");
2624     exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2625 
2626     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2627   }
2628 
2629   if (!do_null && !do_update) {
2630     return result;
2631   }
2632 
2633   ciKlass* exact_signature_k = NULL;
2634   if (do_update) {
2635     // Is the type from the signature exact (the only one possible)?
2636     exact_signature_k = signature_at_call_k->exact_klass();
2637     if (exact_signature_k == NULL) {
2638       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2639     } else {
2640       result = exact_signature_k;
2641       // Known statically. No need to emit any code: prevent
2642       // LIR_Assembler::emit_profile_type() from emitting useless code
2643       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2644     }
2645     // exact_klass and exact_signature_k can be both non NULL but
2646     // different if exact_klass is loaded after the ciObject for
2647     // exact_signature_k is created.
2648     if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2649       // sometimes the type of the signature is better than the best type
2650       // the compiler has
2651       exact_klass = exact_signature_k;
2652     }
2653     if (callee_signature_k != NULL &&
2654         callee_signature_k != signature_at_call_k) {
2655       ciKlass* improved_klass = callee_signature_k->exact_klass();
2656       if (improved_klass == NULL) {
2657         improved_klass = comp->cha_exact_type(callee_signature_k);
2658       }
2659       if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2660         exact_klass = exact_signature_k;
2661       }
2662     }
2663     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2664   }
2665 
2666   if (!do_null && !do_update) {
2667     return result;
2668   }
2669 
2670   if (mdp == LIR_OprFact::illegalOpr) {
2671     mdp = new_register(T_METADATA);
2672     __ metadata2reg(md->constant_encoding(), mdp);
2673     if (md_base_offset != 0) {
2674       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2675       mdp = new_pointer_register();
2676       __ leal(LIR_OprFact::address(base_type_address), mdp);
2677     }
2678   }
2679   LIRItem value(obj, this);
2680   value.load_item();
2681   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2682                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2683   return result;
2684 }
2685 
2686 // profile parameters on entry to the root of the compilation
2687 void LIRGenerator::profile_parameters(Base* x) {
2688   if (compilation()->profile_parameters()) {
2689     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2690     ciMethodData* md = scope()->method()->method_data_or_null();
2691     assert(md != NULL, "Sanity");
2692 
2693     if (md->parameters_type_data() != NULL) {
2694       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2695       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2696       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2697       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2698         LIR_Opr src = args->at(i);
2699         assert(!src->is_illegal(), "check");
2700         BasicType t = src->type();
2701         if (t == T_OBJECT || t == T_ARRAY) {
2702           intptr_t profiled_k = parameters->type(j);
2703           Local* local = x->state()->local_at(java_index)->as_Local();
2704           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2705                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2706                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2707           // If the profile is known statically set it once for all and do not emit any code
2708           if (exact != NULL) {
2709             md->set_parameter_type(j, exact);
2710           }
2711           j++;
2712         }
2713         java_index += type2size[t];
2714       }
2715     }
2716   }
2717 }
2718 
2719 void LIRGenerator::do_Base(Base* x) {
2720   __ std_entry(LIR_OprFact::illegalOpr);
2721   // Emit moves from physical registers / stack slots to virtual registers
2722   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2723   IRScope* irScope = compilation()->hir()->top_scope();
2724   int java_index = 0;
2725   for (int i = 0; i < args->length(); i++) {
2726     LIR_Opr src = args->at(i);
2727     assert(!src->is_illegal(), "check");
2728     BasicType t = src->type();
2729 
2730     // Types which are smaller than int are passed as int, so
2731     // correct the type which passed.
2732     switch (t) {
2733     case T_BYTE:
2734     case T_BOOLEAN:
2735     case T_SHORT:
2736     case T_CHAR:
2737       t = T_INT;
2738       break;
2739     }
2740 
2741     LIR_Opr dest = new_register(t);
2742     __ move(src, dest);
2743 
2744     // Assign new location to Local instruction for this local
2745     Local* local = x->state()->local_at(java_index)->as_Local();
2746     assert(local != NULL, "Locals for incoming arguments must have been created");
2747 #ifndef __SOFTFP__
2748     // The java calling convention passes double as long and float as int.
2749     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2750 #endif // __SOFTFP__
2751     local->set_operand(dest);
2752     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2753     java_index += type2size[t];
2754   }
2755 
2756   if (compilation()->env()->dtrace_method_probes()) {
2757     BasicTypeList signature;
2758     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2759     signature.append(T_METADATA); // Method*
2760     LIR_OprList* args = new LIR_OprList();
2761     args->append(getThreadPointer());
2762     LIR_Opr meth = new_register(T_METADATA);
2763     __ metadata2reg(method()->constant_encoding(), meth);
2764     args->append(meth);
2765     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2766   }
2767 
2768   if (method()->is_synchronized()) {
2769     LIR_Opr obj;
2770     if (method()->is_static()) {
2771       obj = new_register(T_OBJECT);
2772       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2773     } else {
2774       Local* receiver = x->state()->local_at(0)->as_Local();
2775       assert(receiver != NULL, "must already exist");
2776       obj = receiver->operand();
2777     }
2778     assert(obj->is_valid(), "must be valid");
2779 
2780     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2781       LIR_Opr lock = new_register(T_INT);
2782       __ load_stack_address_monitor(0, lock);
2783 
2784       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2785       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2786 
2787       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2788       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2789     }
2790   }
2791   if (compilation()->age_code()) {
2792     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2793     decrement_age(info);
2794   }
2795   // increment invocation counters if needed
2796   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2797     profile_parameters(x);
2798     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2799     increment_invocation_counter(info);
2800   }
2801 
2802   // all blocks with a successor must end with an unconditional jump
2803   // to the successor even if they are consecutive
2804   __ jump(x->default_sux());
2805 }
2806 
2807 
2808 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2809   // construct our frame and model the production of incoming pointer
2810   // to the OSR buffer.
2811   __ osr_entry(LIR_Assembler::osrBufferPointer());
2812   LIR_Opr result = rlock_result(x);
2813   __ move(LIR_Assembler::osrBufferPointer(), result);
2814 }
2815 
2816 
2817 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2818   assert(args->length() == arg_list->length(),
2819          err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2820   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2821     LIRItem* param = args->at(i);
2822     LIR_Opr loc = arg_list->at(i);
2823     if (loc->is_register()) {
2824       param->load_item_force(loc);
2825     } else {
2826       LIR_Address* addr = loc->as_address_ptr();
2827       param->load_for_store(addr->type());
2828       if (addr->type() == T_OBJECT) {
2829         __ move_wide(param->result(), addr);
2830       } else
2831         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2832           __ unaligned_move(param->result(), addr);
2833         } else {
2834           __ move(param->result(), addr);
2835         }
2836     }
2837   }
2838 
2839   if (x->has_receiver()) {
2840     LIRItem* receiver = args->at(0);
2841     LIR_Opr loc = arg_list->at(0);
2842     if (loc->is_register()) {
2843       receiver->load_item_force(loc);
2844     } else {
2845       assert(loc->is_address(), "just checking");
2846       receiver->load_for_store(T_OBJECT);
2847       __ move_wide(receiver->result(), loc->as_address_ptr());
2848     }
2849   }
2850 }
2851 
2852 
2853 // Visits all arguments, returns appropriate items without loading them
2854 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2855   LIRItemList* argument_items = new LIRItemList();
2856   if (x->has_receiver()) {
2857     LIRItem* receiver = new LIRItem(x->receiver(), this);
2858     argument_items->append(receiver);
2859   }
2860   for (int i = 0; i < x->number_of_arguments(); i++) {
2861     LIRItem* param = new LIRItem(x->argument_at(i), this);
2862     argument_items->append(param);
2863   }
2864   return argument_items;
2865 }
2866 
2867 
2868 // The invoke with receiver has following phases:
2869 //   a) traverse and load/lock receiver;
2870 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2871 //   c) push receiver on stack
2872 //   d) load each of the items and push on stack
2873 //   e) unlock receiver
2874 //   f) move receiver into receiver-register %o0
2875 //   g) lock result registers and emit call operation
2876 //
2877 // Before issuing a call, we must spill-save all values on stack
2878 // that are in caller-save register. "spill-save" moves those registers
2879 // either in a free callee-save register or spills them if no free
2880 // callee save register is available.
2881 //
2882 // The problem is where to invoke spill-save.
2883 // - if invoked between e) and f), we may lock callee save
2884 //   register in "spill-save" that destroys the receiver register
2885 //   before f) is executed
2886 // - if we rearrange f) to be earlier (by loading %o0) it
2887 //   may destroy a value on the stack that is currently in %o0
2888 //   and is waiting to be spilled
2889 // - if we keep the receiver locked while doing spill-save,
2890 //   we cannot spill it as it is spill-locked
2891 //
2892 void LIRGenerator::do_Invoke(Invoke* x) {
2893   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2894 
2895   LIR_OprList* arg_list = cc->args();
2896   LIRItemList* args = invoke_visit_arguments(x);
2897   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2898 
2899   // setup result register
2900   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2901   if (x->type() != voidType) {
2902     result_register = result_register_for(x->type());
2903   }
2904 
2905   CodeEmitInfo* info = state_for(x, x->state());
2906 
2907   invoke_load_arguments(x, args, arg_list);
2908 
2909   if (x->has_receiver()) {
2910     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2911     receiver = args->at(0)->result();
2912   }
2913 
2914   // emit invoke code
2915   bool optimized = x->target_is_loaded() && x->target_is_final();
2916   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2917 
2918   // JSR 292
2919   // Preserve the SP over MethodHandle call sites, if needed.
2920   ciMethod* target = x->target();
2921   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2922                                   target->is_method_handle_intrinsic() ||
2923                                   target->is_compiled_lambda_form());
2924   if (is_method_handle_invoke) {
2925     info->set_is_method_handle_invoke(true);
2926     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2927         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2928     }
2929   }
2930 
2931   switch (x->code()) {
2932     case Bytecodes::_invokestatic:
2933       __ call_static(target, result_register,
2934                      SharedRuntime::get_resolve_static_call_stub(),
2935                      arg_list, info);
2936       break;
2937     case Bytecodes::_invokespecial:
2938     case Bytecodes::_invokevirtual:
2939     case Bytecodes::_invokeinterface:
2940       // for final target we still produce an inline cache, in order
2941       // to be able to call mixed mode
2942       if (x->code() == Bytecodes::_invokespecial || optimized) {
2943         __ call_opt_virtual(target, receiver, result_register,
2944                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2945                             arg_list, info);
2946       } else if (x->vtable_index() < 0) {
2947         __ call_icvirtual(target, receiver, result_register,
2948                           SharedRuntime::get_resolve_virtual_call_stub(),
2949                           arg_list, info);
2950       } else {
2951         int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2952         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2953         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2954       }
2955       break;
2956     case Bytecodes::_invokedynamic: {
2957       __ call_dynamic(target, receiver, result_register,
2958                       SharedRuntime::get_resolve_static_call_stub(),
2959                       arg_list, info);
2960       break;
2961     }
2962     default:
2963       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2964       break;
2965   }
2966 
2967   // JSR 292
2968   // Restore the SP after MethodHandle call sites, if needed.
2969   if (is_method_handle_invoke
2970       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2971     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2972   }
2973 
2974   if (x->type()->is_float() || x->type()->is_double()) {
2975     // Force rounding of results from non-strictfp when in strictfp
2976     // scope (or when we don't know the strictness of the callee, to
2977     // be safe.)
2978     if (method()->is_strict()) {
2979       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2980         result_register = round_item(result_register);
2981       }
2982     }
2983   }
2984 
2985   if (result_register->is_valid()) {
2986     LIR_Opr result = rlock_result(x);
2987     __ move(result_register, result);
2988   }
2989 }
2990 
2991 
2992 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2993   assert(x->number_of_arguments() == 1, "wrong type");
2994   LIRItem value       (x->argument_at(0), this);
2995   LIR_Opr reg = rlock_result(x);
2996   value.load_item();
2997   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2998   __ move(tmp, reg);
2999 }
3000 
3001 
3002 
3003 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3004 void LIRGenerator::do_IfOp(IfOp* x) {
3005 #ifdef ASSERT
3006   {
3007     ValueTag xtag = x->x()->type()->tag();
3008     ValueTag ttag = x->tval()->type()->tag();
3009     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3010     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3011     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3012   }
3013 #endif
3014 
3015   LIRItem left(x->x(), this);
3016   LIRItem right(x->y(), this);
3017   left.load_item();
3018   if (can_inline_as_constant(right.value())) {
3019     right.dont_load_item();
3020   } else {
3021     right.load_item();
3022   }
3023 
3024   LIRItem t_val(x->tval(), this);
3025   LIRItem f_val(x->fval(), this);
3026   t_val.dont_load_item();
3027   f_val.dont_load_item();
3028   LIR_Opr reg = rlock_result(x);
3029 
3030   __ cmp(lir_cond(x->cond()), left.result(), right.result());
3031   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3032 }
3033 
3034 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3035     assert(x->number_of_arguments() == expected_arguments, "wrong type");
3036     LIR_Opr reg = result_register_for(x->type());
3037     __ call_runtime_leaf(routine, getThreadTemp(),
3038                          reg, new LIR_OprList());
3039     LIR_Opr result = rlock_result(x);
3040     __ move(reg, result);
3041 }
3042 
3043 #ifdef TRACE_HAVE_INTRINSICS
3044 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3045     LIR_Opr thread = getThreadPointer();
3046     LIR_Opr osthread = new_pointer_register();
3047     __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3048     size_t thread_id_size = OSThread::thread_id_size();
3049     if (thread_id_size == (size_t) BytesPerLong) {
3050       LIR_Opr id = new_register(T_LONG);
3051       __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3052       __ convert(Bytecodes::_l2i, id, rlock_result(x));
3053     } else if (thread_id_size == (size_t) BytesPerInt) {
3054       __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3055     } else {
3056       ShouldNotReachHere();
3057     }
3058 }
3059 
3060 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3061     CodeEmitInfo* info = state_for(x);
3062     CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3063     BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3064     assert(info != NULL, "must have info");
3065     LIRItem arg(x->argument_at(1), this);
3066     arg.load_item();
3067     LIR_Opr klass = new_pointer_register();
3068     __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3069     LIR_Opr id = new_register(T_LONG);
3070     ByteSize offset = TRACE_ID_OFFSET;
3071     LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3072     __ move(trace_id_addr, id);
3073     __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3074     __ store(id, trace_id_addr);
3075     __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3076     __ move(id, rlock_result(x));
3077 }
3078 #endif
3079 
3080 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3081   switch (x->id()) {
3082   case vmIntrinsics::_intBitsToFloat      :
3083   case vmIntrinsics::_doubleToRawLongBits :
3084   case vmIntrinsics::_longBitsToDouble    :
3085   case vmIntrinsics::_floatToRawIntBits   : {
3086     do_FPIntrinsics(x);
3087     break;
3088   }
3089 
3090 #ifdef TRACE_HAVE_INTRINSICS
3091   case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3092   case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3093   case vmIntrinsics::_counterTime:
3094     do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3095     break;
3096 #endif
3097 
3098   case vmIntrinsics::_currentTimeMillis:
3099     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3100     break;
3101 
3102   case vmIntrinsics::_nanoTime:
3103     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3104     break;
3105 
3106   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3107   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3108   case vmIntrinsics::_getClass:       do_getClass(x);      break;
3109   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
3110 
3111   case vmIntrinsics::_dlog:           // fall through
3112   case vmIntrinsics::_dlog10:         // fall through
3113   case vmIntrinsics::_dabs:           // fall through
3114   case vmIntrinsics::_dsqrt:          // fall through
3115   case vmIntrinsics::_dtan:           // fall through
3116   case vmIntrinsics::_dsin :          // fall through
3117   case vmIntrinsics::_dcos :          // fall through
3118   case vmIntrinsics::_dexp :          // fall through
3119   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3120   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3121 
3122   // java.nio.Buffer.checkIndex
3123   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
3124 
3125   case vmIntrinsics::_compareAndSwapObject:
3126     do_CompareAndSwap(x, objectType);
3127     break;
3128   case vmIntrinsics::_compareAndSwapInt:
3129     do_CompareAndSwap(x, intType);
3130     break;
3131   case vmIntrinsics::_compareAndSwapLong:
3132     do_CompareAndSwap(x, longType);
3133     break;
3134 
3135   case vmIntrinsics::_loadFence :
3136     if (os::is_MP()) __ membar_acquire();
3137     break;
3138   case vmIntrinsics::_storeFence:
3139     if (os::is_MP()) __ membar_release();
3140     break;
3141   case vmIntrinsics::_fullFence :
3142     if (os::is_MP()) __ membar();
3143     break;
3144 
3145   case vmIntrinsics::_Reference_get:
3146     do_Reference_get(x);
3147     break;
3148 
3149   case vmIntrinsics::_updateCRC32:
3150   case vmIntrinsics::_updateBytesCRC32:
3151   case vmIntrinsics::_updateByteBufferCRC32:
3152     do_update_CRC32(x);
3153     break;
3154 
3155   default: ShouldNotReachHere(); break;
3156   }
3157 }
3158 
3159 void LIRGenerator::profile_arguments(ProfileCall* x) {
3160   if (compilation()->profile_arguments()) {
3161     int bci = x->bci_of_invoke();
3162     ciMethodData* md = x->method()->method_data_or_null();
3163     ciProfileData* data = md->bci_to_data(bci);
3164     if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3165         (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3166       ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3167       int base_offset = md->byte_offset_of_slot(data, extra);
3168       LIR_Opr mdp = LIR_OprFact::illegalOpr;
3169       ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3170 
3171       Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3172       int start = 0;
3173       int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3174       if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3175         // first argument is not profiled at call (method handle invoke)
3176         assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3177         start = 1;
3178       }
3179       ciSignature* callee_signature = x->callee()->signature();
3180       // method handle call to virtual method
3181       bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3182       ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3183 
3184       bool ignored_will_link;
3185       ciSignature* signature_at_call = NULL;
3186       x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3187       ciSignatureStream signature_at_call_stream(signature_at_call);
3188 
3189       // if called through method handle invoke, some arguments may have been popped
3190       for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3191         int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3192         ciKlass* exact = profile_type(md, base_offset, off,
3193                                       args->type(i), x->profiled_arg_at(i+start), mdp,
3194                                       !x->arg_needs_null_check(i+start),
3195                                       signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3196         if (exact != NULL) {
3197           md->set_argument_type(bci, i, exact);
3198         }
3199       }
3200     } else {
3201 #ifdef ASSERT
3202       Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3203       int n = x->nb_profiled_args();
3204       assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3205                                                   (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3206              "only at JSR292 bytecodes");
3207 #endif
3208     }
3209   }
3210 }
3211 
3212 // profile parameters on entry to an inlined method
3213 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3214   if (compilation()->profile_parameters() && x->inlined()) {
3215     ciMethodData* md = x->callee()->method_data_or_null();
3216     if (md != NULL) {
3217       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3218       if (parameters_type_data != NULL) {
3219         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3220         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3221         bool has_receiver = !x->callee()->is_static();
3222         ciSignature* sig = x->callee()->signature();
3223         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3224         int i = 0; // to iterate on the Instructions
3225         Value arg = x->recv();
3226         bool not_null = false;
3227         int bci = x->bci_of_invoke();
3228         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3229         // The first parameter is the receiver so that's what we start
3230         // with if it exists. One exception is method handle call to
3231         // virtual method: the receiver is in the args list
3232         if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3233           i = 1;
3234           arg = x->profiled_arg_at(0);
3235           not_null = !x->arg_needs_null_check(0);
3236         }
3237         int k = 0; // to iterate on the profile data
3238         for (;;) {
3239           intptr_t profiled_k = parameters->type(k);
3240           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3241                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3242                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3243           // If the profile is known statically set it once for all and do not emit any code
3244           if (exact != NULL) {
3245             md->set_parameter_type(k, exact);
3246           }
3247           k++;
3248           if (k >= parameters_type_data->number_of_parameters()) {
3249 #ifdef ASSERT
3250             int extra = 0;
3251             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3252                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3253                 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3254               extra += 1;
3255             }
3256             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3257 #endif
3258             break;
3259           }
3260           arg = x->profiled_arg_at(i);
3261           not_null = !x->arg_needs_null_check(i);
3262           i++;
3263         }
3264       }
3265     }
3266   }
3267 }
3268 
3269 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3270   // Need recv in a temporary register so it interferes with the other temporaries
3271   LIR_Opr recv = LIR_OprFact::illegalOpr;
3272   LIR_Opr mdo = new_register(T_OBJECT);
3273   // tmp is used to hold the counters on SPARC
3274   LIR_Opr tmp = new_pointer_register();
3275 
3276   if (x->nb_profiled_args() > 0) {
3277     profile_arguments(x);
3278   }
3279 
3280   // profile parameters on inlined method entry including receiver
3281   if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3282     profile_parameters_at_call(x);
3283   }
3284 
3285   if (x->recv() != NULL) {
3286     LIRItem value(x->recv(), this);
3287     value.load_item();
3288     recv = new_register(T_OBJECT);
3289     __ move(value.result(), recv);
3290   }
3291   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3292 }
3293 
3294 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3295   int bci = x->bci_of_invoke();
3296   ciMethodData* md = x->method()->method_data_or_null();
3297   ciProfileData* data = md->bci_to_data(bci);
3298   assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3299   ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3300   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3301 
3302   bool ignored_will_link;
3303   ciSignature* signature_at_call = NULL;
3304   x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3305 
3306   // The offset within the MDO of the entry to update may be too large
3307   // to be used in load/store instructions on some platforms. So have
3308   // profile_type() compute the address of the profile in a register.
3309   ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3310                                 ret->type(), x->ret(), mdp,
3311                                 !x->needs_null_check(),
3312                                 signature_at_call->return_type()->as_klass(),
3313                                 x->callee()->signature()->return_type()->as_klass());
3314   if (exact != NULL) {
3315     md->set_return_type(bci, exact);
3316   }
3317 }
3318 
3319 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3320   // We can safely ignore accessors here, since c2 will inline them anyway,
3321   // accessors are also always mature.
3322   if (!x->inlinee()->is_accessor()) {
3323     CodeEmitInfo* info = state_for(x, x->state(), true);
3324     // Notify the runtime very infrequently only to take care of counter overflows
3325     int freq_log = Tier23InlineeNotifyFreqLog;
3326     double scale;
3327     if (_method->has_option_value("CompileThresholdScaling", scale)) {
3328       freq_log = Arguments::scaled_freq_log(freq_log, scale);
3329     }
3330     increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
3331   }
3332 }
3333 
3334 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3335   int freq_log;
3336   int level = compilation()->env()->comp_level();
3337   if (level == CompLevel_limited_profile) {
3338     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3339   } else if (level == CompLevel_full_profile) {
3340     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3341   } else {
3342     ShouldNotReachHere();
3343   }
3344   // Increment the appropriate invocation/backedge counter and notify the runtime.
3345   double scale;
3346   if (_method->has_option_value("CompileThresholdScaling", scale)) {
3347     freq_log = Arguments::scaled_freq_log(freq_log, scale);
3348   }
3349   increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
3350 }
3351 
3352 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3353   ciMethod* method = info->scope()->method();
3354   MethodCounters* mc_adr = method->ensure_method_counters();
3355   if (mc_adr != NULL) {
3356     LIR_Opr mc = new_pointer_register();
3357     __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3358     int offset = in_bytes(MethodCounters::nmethod_age_offset());
3359     LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3360     LIR_Opr result = new_register(T_INT);
3361     __ load(counter, result);
3362     __ sub(result, LIR_OprFact::intConst(1), result);
3363     __ store(result, counter);
3364     // DeoptimizeStub will reexecute from the current state in code info.
3365     CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3366                                          Deoptimization::Action_make_not_entrant);
3367     __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3368     __ branch(lir_cond_lessEqual, T_INT, deopt);
3369   }
3370 }
3371 
3372 
3373 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3374                                                 ciMethod *method, int frequency,
3375                                                 int bci, bool backedge, bool notify) {
3376   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3377   int level = _compilation->env()->comp_level();
3378   assert(level > CompLevel_simple, "Shouldn't be here");
3379 
3380   int offset = -1;
3381   LIR_Opr counter_holder;
3382   if (level == CompLevel_limited_profile) {
3383     MethodCounters* counters_adr = method->ensure_method_counters();
3384     if (counters_adr == NULL) {
3385       bailout("method counters allocation failed");
3386       return;
3387     }
3388     counter_holder = new_pointer_register();
3389     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3390     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3391                                  MethodCounters::invocation_counter_offset());
3392   } else if (level == CompLevel_full_profile) {
3393     counter_holder = new_register(T_METADATA);
3394     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3395                                  MethodData::invocation_counter_offset());
3396     ciMethodData* md = method->method_data_or_null();
3397     assert(md != NULL, "Sanity");
3398     __ metadata2reg(md->constant_encoding(), counter_holder);
3399   } else {
3400     ShouldNotReachHere();
3401   }
3402   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3403   LIR_Opr result = new_register(T_INT);
3404   __ load(counter, result);
3405   __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3406   __ store(result, counter);
3407   if (notify) {
3408     LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3409     LIR_Opr meth = new_register(T_METADATA);
3410     __ metadata2reg(method->constant_encoding(), meth);
3411     __ logical_and(result, mask, result);
3412     __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3413     // The bci for info can point to cmp for if's we want the if bci
3414     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3415     __ branch(lir_cond_equal, T_INT, overflow);
3416     __ branch_destination(overflow->continuation());
3417   }
3418 }
3419 
3420 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3421   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3422   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3423 
3424   if (x->pass_thread()) {
3425     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3426     args->append(getThreadPointer());
3427   }
3428 
3429   for (int i = 0; i < x->number_of_arguments(); i++) {
3430     Value a = x->argument_at(i);
3431     LIRItem* item = new LIRItem(a, this);
3432     item->load_item();
3433     args->append(item->result());
3434     signature->append(as_BasicType(a->type()));
3435   }
3436 
3437   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3438   if (x->type() == voidType) {
3439     set_no_result(x);
3440   } else {
3441     __ move(result, rlock_result(x));
3442   }
3443 }
3444 
3445 #ifdef ASSERT
3446 void LIRGenerator::do_Assert(Assert *x) {
3447   ValueTag tag = x->x()->type()->tag();
3448   If::Condition cond = x->cond();
3449 
3450   LIRItem xitem(x->x(), this);
3451   LIRItem yitem(x->y(), this);
3452   LIRItem* xin = &xitem;
3453   LIRItem* yin = &yitem;
3454 
3455   assert(tag == intTag, "Only integer assertions are valid!");
3456 
3457   xin->load_item();
3458   yin->dont_load_item();
3459 
3460   set_no_result(x);
3461 
3462   LIR_Opr left = xin->result();
3463   LIR_Opr right = yin->result();
3464 
3465   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3466 }
3467 #endif
3468 
3469 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3470 
3471 
3472   Instruction *a = x->x();
3473   Instruction *b = x->y();
3474   if (!a || StressRangeCheckElimination) {
3475     assert(!b || StressRangeCheckElimination, "B must also be null");
3476 
3477     CodeEmitInfo *info = state_for(x, x->state());
3478     CodeStub* stub = new PredicateFailedStub(info);
3479 
3480     __ jump(stub);
3481   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3482     int a_int = a->type()->as_IntConstant()->value();
3483     int b_int = b->type()->as_IntConstant()->value();
3484 
3485     bool ok = false;
3486 
3487     switch(x->cond()) {
3488       case Instruction::eql: ok = (a_int == b_int); break;
3489       case Instruction::neq: ok = (a_int != b_int); break;
3490       case Instruction::lss: ok = (a_int < b_int); break;
3491       case Instruction::leq: ok = (a_int <= b_int); break;
3492       case Instruction::gtr: ok = (a_int > b_int); break;
3493       case Instruction::geq: ok = (a_int >= b_int); break;
3494       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3495       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3496       default: ShouldNotReachHere();
3497     }
3498 
3499     if (ok) {
3500 
3501       CodeEmitInfo *info = state_for(x, x->state());
3502       CodeStub* stub = new PredicateFailedStub(info);
3503 
3504       __ jump(stub);
3505     }
3506   } else {
3507 
3508     ValueTag tag = x->x()->type()->tag();
3509     If::Condition cond = x->cond();
3510     LIRItem xitem(x->x(), this);
3511     LIRItem yitem(x->y(), this);
3512     LIRItem* xin = &xitem;
3513     LIRItem* yin = &yitem;
3514 
3515     assert(tag == intTag, "Only integer deoptimizations are valid!");
3516 
3517     xin->load_item();
3518     yin->dont_load_item();
3519     set_no_result(x);
3520 
3521     LIR_Opr left = xin->result();
3522     LIR_Opr right = yin->result();
3523 
3524     CodeEmitInfo *info = state_for(x, x->state());
3525     CodeStub* stub = new PredicateFailedStub(info);
3526 
3527     __ cmp(lir_cond(cond), left, right);
3528     __ branch(lir_cond(cond), right->type(), stub);
3529   }
3530 }
3531 
3532 
3533 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3534   LIRItemList args(1);
3535   LIRItem value(arg1, this);
3536   args.append(&value);
3537   BasicTypeList signature;
3538   signature.append(as_BasicType(arg1->type()));
3539 
3540   return call_runtime(&signature, &args, entry, result_type, info);
3541 }
3542 
3543 
3544 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3545   LIRItemList args(2);
3546   LIRItem value1(arg1, this);
3547   LIRItem value2(arg2, this);
3548   args.append(&value1);
3549   args.append(&value2);
3550   BasicTypeList signature;
3551   signature.append(as_BasicType(arg1->type()));
3552   signature.append(as_BasicType(arg2->type()));
3553 
3554   return call_runtime(&signature, &args, entry, result_type, info);
3555 }
3556 
3557 
3558 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3559                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3560   // get a result register
3561   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3562   LIR_Opr result = LIR_OprFact::illegalOpr;
3563   if (result_type->tag() != voidTag) {
3564     result = new_register(result_type);
3565     phys_reg = result_register_for(result_type);
3566   }
3567 
3568   // move the arguments into the correct location
3569   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3570   assert(cc->length() == args->length(), "argument mismatch");
3571   for (int i = 0; i < args->length(); i++) {
3572     LIR_Opr arg = args->at(i);
3573     LIR_Opr loc = cc->at(i);
3574     if (loc->is_register()) {
3575       __ move(arg, loc);
3576     } else {
3577       LIR_Address* addr = loc->as_address_ptr();
3578 //           if (!can_store_as_constant(arg)) {
3579 //             LIR_Opr tmp = new_register(arg->type());
3580 //             __ move(arg, tmp);
3581 //             arg = tmp;
3582 //           }
3583       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3584         __ unaligned_move(arg, addr);
3585       } else {
3586         __ move(arg, addr);
3587       }
3588     }
3589   }
3590 
3591   if (info) {
3592     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3593   } else {
3594     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3595   }
3596   if (result->is_valid()) {
3597     __ move(phys_reg, result);
3598   }
3599   return result;
3600 }
3601 
3602 
3603 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3604                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3605   // get a result register
3606   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3607   LIR_Opr result = LIR_OprFact::illegalOpr;
3608   if (result_type->tag() != voidTag) {
3609     result = new_register(result_type);
3610     phys_reg = result_register_for(result_type);
3611   }
3612 
3613   // move the arguments into the correct location
3614   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3615 
3616   assert(cc->length() == args->length(), "argument mismatch");
3617   for (int i = 0; i < args->length(); i++) {
3618     LIRItem* arg = args->at(i);
3619     LIR_Opr loc = cc->at(i);
3620     if (loc->is_register()) {
3621       arg->load_item_force(loc);
3622     } else {
3623       LIR_Address* addr = loc->as_address_ptr();
3624       arg->load_for_store(addr->type());
3625       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3626         __ unaligned_move(arg->result(), addr);
3627       } else {
3628         __ move(arg->result(), addr);
3629       }
3630     }
3631   }
3632 
3633   if (info) {
3634     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3635   } else {
3636     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3637   }
3638   if (result->is_valid()) {
3639     __ move(phys_reg, result);
3640   }
3641   return result;
3642 }
3643 
3644 void LIRGenerator::do_MemBar(MemBar* x) {
3645   if (os::is_MP()) {
3646     LIR_Code code = x->code();
3647     switch(code) {
3648       case lir_membar_acquire   : __ membar_acquire(); break;
3649       case lir_membar_release   : __ membar_release(); break;
3650       case lir_membar           : __ membar(); break;
3651       case lir_membar_loadload  : __ membar_loadload(); break;
3652       case lir_membar_storestore: __ membar_storestore(); break;
3653       case lir_membar_loadstore : __ membar_loadstore(); break;
3654       case lir_membar_storeload : __ membar_storeload(); break;
3655       default                   : ShouldNotReachHere(); break;
3656     }
3657   }
3658 }