1 /*
   2  * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_c1_LIRGenerator_x86.cpp.incl"
  27 
  28 #ifdef ASSERT
  29 #define __ gen()->lir(__FILE__, __LINE__)->
  30 #else
  31 #define __ gen()->lir()->
  32 #endif
  33 
  34 // Item will be loaded into a byte register; Intel only
  35 void LIRItem::load_byte_item() {
  36   load_item();
  37   LIR_Opr res = result();
  38 
  39   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
  40     // make sure that it is a byte register
  41     assert(!value()->type()->is_float() && !value()->type()->is_double(),
  42            "can't load floats in byte register");
  43     LIR_Opr reg = _gen->rlock_byte(T_BYTE);
  44     __ move(res, reg);
  45 
  46     _result = reg;
  47   }
  48 }
  49 
  50 
  51 void LIRItem::load_nonconstant() {
  52   LIR_Opr r = value()->operand();
  53   if (r->is_constant()) {
  54     _result = r;
  55   } else {
  56     load_item();
  57   }
  58 }
  59 
  60 //--------------------------------------------------------------
  61 //               LIRGenerator
  62 //--------------------------------------------------------------
  63 
  64 
  65 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; }
  66 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::rdx_opr; }
  67 LIR_Opr LIRGenerator::divInOpr()        { return FrameMap::rax_opr; }
  68 LIR_Opr LIRGenerator::divOutOpr()       { return FrameMap::rax_opr; }
  69 LIR_Opr LIRGenerator::remOutOpr()       { return FrameMap::rdx_opr; }
  70 LIR_Opr LIRGenerator::shiftCountOpr()   { return FrameMap::rcx_opr; }
  71 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::rax_opr; }
  72 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  73 
  74 
  75 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  76   LIR_Opr opr;
  77   switch (type->tag()) {
  78     case intTag:     opr = FrameMap::rax_opr;          break;
  79     case objectTag:  opr = FrameMap::rax_oop_opr;      break;
  80     case longTag:    opr = FrameMap::long0_opr;        break;
  81     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
  82     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
  83 
  84     case addressTag:
  85     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  86   }
  87 
  88   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  89   return opr;
  90 }
  91 
  92 
  93 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  94   LIR_Opr reg = new_register(T_INT);
  95   set_vreg_flag(reg, LIRGenerator::byte_reg);
  96   return reg;
  97 }
  98 
  99 
 100 //--------- loading items into registers --------------------------------
 101 
 102 
 103 // i486 instructions can inline constants
 104 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 105   if (type == T_SHORT || type == T_CHAR) {
 106     // there is no immediate move of word values in asembler_i486.?pp
 107     return false;
 108   }
 109   Constant* c = v->as_Constant();
 110   if (c && c->state() == NULL) {
 111     // constants of any type can be stored directly, except for
 112     // unloaded object constants.
 113     return true;
 114   }
 115   return false;
 116 }
 117 
 118 
 119 bool LIRGenerator::can_inline_as_constant(Value v) const {
 120   if (v->type()->tag() == longTag) return false;
 121   return v->type()->tag() != objectTag ||
 122     (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
 123 }
 124 
 125 
 126 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 127   if (c->type() == T_LONG) return false;
 128   return c->type() != T_OBJECT || c->as_jobject() == NULL;
 129 }
 130 
 131 
 132 LIR_Opr LIRGenerator::safepoint_poll_register() {
 133   return LIR_OprFact::illegalOpr;
 134 }
 135 
 136 
 137 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 138                                             int shift, int disp, BasicType type) {
 139   assert(base->is_register(), "must be");
 140   if (index->is_constant()) {
 141     return new LIR_Address(base,
 142                            (index->as_constant_ptr()->as_jint() << shift) + disp,
 143                            type);
 144   } else {
 145     return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 146   }
 147 }
 148 
 149 
 150 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 151                                               BasicType type, bool needs_card_mark) {
 152   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 153 
 154   LIR_Address* addr;
 155   if (index_opr->is_constant()) {
 156     int elem_size = type2aelembytes(type);
 157     addr = new LIR_Address(array_opr,
 158                            offset_in_bytes + index_opr->as_jint() * elem_size, type);
 159   } else {
 160 #ifdef _LP64
 161     if (index_opr->type() == T_INT) {
 162       LIR_Opr tmp = new_register(T_LONG);
 163       __ convert(Bytecodes::_i2l, index_opr, tmp);
 164       index_opr = tmp;
 165     }
 166 #endif // _LP64
 167     addr =  new LIR_Address(array_opr,
 168                             index_opr,
 169                             LIR_Address::scale(type),
 170                             offset_in_bytes, type);
 171   }
 172   if (needs_card_mark) {
 173     // This store will need a precise card mark, so go ahead and
 174     // compute the full adddres instead of computing once for the
 175     // store and again for the card mark.
 176     LIR_Opr tmp = new_pointer_register();
 177     __ leal(LIR_OprFact::address(addr), tmp);
 178     return new LIR_Address(tmp, 0, type);
 179   } else {
 180     return addr;
 181   }
 182 }
 183 
 184 
 185 void LIRGenerator::increment_counter(address counter, int step) {
 186   LIR_Opr pointer = new_pointer_register();
 187   __ move(LIR_OprFact::intptrConst(counter), pointer);
 188   LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
 189   increment_counter(addr, step);
 190 }
 191 
 192 
 193 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 194   __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
 195 }
 196 
 197 
 198 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 199   __ cmp_mem_int(condition, base, disp, c, info);
 200 }
 201 
 202 
 203 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 204   __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
 205 }
 206 
 207 
 208 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
 209   __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
 210 }
 211 
 212 
 213 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 214   if (tmp->is_valid()) {
 215     if (is_power_of_2(c + 1)) {
 216       __ move(left, tmp);
 217       __ shift_left(left, log2_intptr(c + 1), left);
 218       __ sub(left, tmp, result);
 219       return true;
 220     } else if (is_power_of_2(c - 1)) {
 221       __ move(left, tmp);
 222       __ shift_left(left, log2_intptr(c - 1), left);
 223       __ add(left, tmp, result);
 224       return true;
 225     }
 226   }
 227   return false;
 228 }
 229 
 230 
 231 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 232   BasicType type = item->type();
 233   __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
 234 }
 235 
 236 //----------------------------------------------------------------------
 237 //             visitor functions
 238 //----------------------------------------------------------------------
 239 
 240 
 241 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 242   assert(x->is_root(),"");
 243   bool needs_range_check = true;
 244   bool use_length = x->length() != NULL;
 245   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 246   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 247                                          !get_jobject_constant(x->value())->is_null_object());
 248 
 249   LIRItem array(x->array(), this);
 250   LIRItem index(x->index(), this);
 251   LIRItem value(x->value(), this);
 252   LIRItem length(this);
 253 
 254   array.load_item();
 255   index.load_nonconstant();
 256 
 257   if (use_length) {
 258     needs_range_check = x->compute_needs_range_check();
 259     if (needs_range_check) {
 260       length.set_instruction(x->length());
 261       length.load_item();
 262     }
 263   }
 264   if (needs_store_check) {
 265     value.load_item();
 266   } else {
 267     value.load_for_store(x->elt_type());
 268   }
 269 
 270   set_no_result(x);
 271 
 272   // the CodeEmitInfo must be duplicated for each different
 273   // LIR-instruction because spilling can occur anywhere between two
 274   // instructions and so the debug information must be different
 275   CodeEmitInfo* range_check_info = state_for(x);
 276   CodeEmitInfo* null_check_info = NULL;
 277   if (x->needs_null_check()) {
 278     null_check_info = new CodeEmitInfo(range_check_info);
 279   }
 280 
 281   // emit array address setup early so it schedules better
 282   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 283 
 284   if (GenerateRangeChecks && needs_range_check) {
 285     if (use_length) {
 286       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 287       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 288     } else {
 289       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 290       // range_check also does the null check
 291       null_check_info = NULL;
 292     }
 293   }
 294 
 295   if (GenerateArrayStoreCheck && needs_store_check) {
 296     LIR_Opr tmp1 = new_register(objectType);
 297     LIR_Opr tmp2 = new_register(objectType);
 298     LIR_Opr tmp3 = new_register(objectType);
 299 
 300     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 301     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info);
 302   }
 303 
 304   if (obj_store) {
 305     // Needs GC write barriers.
 306     pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
 307     __ move(value.result(), array_addr, null_check_info);
 308     // Seems to be a precise
 309     post_barrier(LIR_OprFact::address(array_addr), value.result());
 310   } else {
 311     __ move(value.result(), array_addr, null_check_info);
 312   }
 313 }
 314 
 315 
 316 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 317   assert(x->is_root(),"");
 318   LIRItem obj(x->obj(), this);
 319   obj.load_item();
 320 
 321   set_no_result(x);
 322 
 323   // "lock" stores the address of the monitor stack slot, so this is not an oop
 324   LIR_Opr lock = new_register(T_INT);
 325   // Need a scratch register for biased locking on x86
 326   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 327   if (UseBiasedLocking) {
 328     scratch = new_register(T_INT);
 329   }
 330 
 331   CodeEmitInfo* info_for_exception = NULL;
 332   if (x->needs_null_check()) {
 333     info_for_exception = state_for(x, x->lock_stack_before());
 334   }
 335   // this CodeEmitInfo must not have the xhandlers because here the
 336   // object is already locked (xhandlers expect object to be unlocked)
 337   CodeEmitInfo* info = state_for(x, x->state(), true);
 338   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 339                         x->monitor_no(), info_for_exception, info);
 340 }
 341 
 342 
 343 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 344   assert(x->is_root(),"");
 345 
 346   LIRItem obj(x->obj(), this);
 347   obj.dont_load_item();
 348 
 349   LIR_Opr lock = new_register(T_INT);
 350   LIR_Opr obj_temp = new_register(T_INT);
 351   set_no_result(x);
 352   monitor_exit(obj_temp, lock, syncTempOpr(), x->monitor_no());
 353 }
 354 
 355 
 356 // _ineg, _lneg, _fneg, _dneg
 357 void LIRGenerator::do_NegateOp(NegateOp* x) {
 358   LIRItem value(x->x(), this);
 359   value.set_destroys_register();
 360   value.load_item();
 361   LIR_Opr reg = rlock(x);
 362   __ negate(value.result(), reg);
 363 
 364   set_result(x, round_item(reg));
 365 }
 366 
 367 
 368 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 369 //      _dadd, _dmul, _dsub, _ddiv, _drem
 370 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 371   LIRItem left(x->x(),  this);
 372   LIRItem right(x->y(), this);
 373   LIRItem* left_arg  = &left;
 374   LIRItem* right_arg = &right;
 375   assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands");
 376   bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem);
 377   if (left.is_register() || x->x()->type()->is_constant() || must_load_both) {
 378     left.load_item();
 379   } else {
 380     left.dont_load_item();
 381   }
 382 
 383   // do not load right operand if it is a constant.  only 0 and 1 are
 384   // loaded because there are special instructions for loading them
 385   // without memory access (not needed for SSE2 instructions)
 386   bool must_load_right = false;
 387   if (right.is_constant()) {
 388     LIR_Const* c = right.result()->as_constant_ptr();
 389     assert(c != NULL, "invalid constant");
 390     assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
 391 
 392     if (c->type() == T_FLOAT) {
 393       must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float());
 394     } else {
 395       must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double());
 396     }
 397   }
 398 
 399   if (must_load_both) {
 400     // frem and drem destroy also right operand, so move it to a new register
 401     right.set_destroys_register();
 402     right.load_item();
 403   } else if (right.is_register() || must_load_right) {
 404     right.load_item();
 405   } else {
 406     right.dont_load_item();
 407   }
 408   LIR_Opr reg = rlock(x);
 409   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 410   if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
 411     tmp = new_register(T_DOUBLE);
 412   }
 413 
 414   if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) {
 415     // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots
 416     LIR_Opr fpu0, fpu1;
 417     if (x->op() == Bytecodes::_frem) {
 418       fpu0 = LIR_OprFact::single_fpu(0);
 419       fpu1 = LIR_OprFact::single_fpu(1);
 420     } else {
 421       fpu0 = LIR_OprFact::double_fpu(0);
 422       fpu1 = LIR_OprFact::double_fpu(1);
 423     }
 424     __ move(right.result(), fpu1); // order of left and right operand is important!
 425     __ move(left.result(), fpu0);
 426     __ rem (fpu0, fpu1, fpu0);
 427     __ move(fpu0, reg);
 428 
 429   } else {
 430     arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp);
 431   }
 432 
 433   set_result(x, round_item(reg));
 434 }
 435 
 436 
 437 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 438 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 439   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) {
 440     // long division is implemented as a direct call into the runtime
 441     LIRItem left(x->x(), this);
 442     LIRItem right(x->y(), this);
 443 
 444     // the check for division by zero destroys the right operand
 445     right.set_destroys_register();
 446 
 447     BasicTypeList signature(2);
 448     signature.append(T_LONG);
 449     signature.append(T_LONG);
 450     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 451 
 452     // check for division by zero (destroys registers of right operand!)
 453     CodeEmitInfo* info = state_for(x);
 454 
 455     const LIR_Opr result_reg = result_register_for(x->type());
 456     left.load_item_force(cc->at(1));
 457     right.load_item();
 458 
 459     __ move(right.result(), cc->at(0));
 460 
 461     __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 462     __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 463 
 464     address entry;
 465     switch (x->op()) {
 466     case Bytecodes::_lrem:
 467       entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
 468       break; // check if dividend is 0 is done elsewhere
 469     case Bytecodes::_ldiv:
 470       entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
 471       break; // check if dividend is 0 is done elsewhere
 472     case Bytecodes::_lmul:
 473       entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
 474       break;
 475     default:
 476       ShouldNotReachHere();
 477     }
 478 
 479     LIR_Opr result = rlock_result(x);
 480     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 481     __ move(result_reg, result);
 482   } else if (x->op() == Bytecodes::_lmul) {
 483     // missing test if instr is commutative and if we should swap
 484     LIRItem left(x->x(), this);
 485     LIRItem right(x->y(), this);
 486 
 487     // right register is destroyed by the long mul, so it must be
 488     // copied to a new register.
 489     right.set_destroys_register();
 490 
 491     left.load_item();
 492     right.load_item();
 493 
 494     LIR_Opr reg = FrameMap::long0_opr;
 495     arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
 496     LIR_Opr result = rlock_result(x);
 497     __ move(reg, result);
 498   } else {
 499     // missing test if instr is commutative and if we should swap
 500     LIRItem left(x->x(), this);
 501     LIRItem right(x->y(), this);
 502 
 503     left.load_item();
 504     // don't load constants to save register
 505     right.load_nonconstant();
 506     rlock_result(x);
 507     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 508   }
 509 }
 510 
 511 
 512 
 513 // for: _iadd, _imul, _isub, _idiv, _irem
 514 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 515   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 516     // The requirements for division and modulo
 517     // input : rax,: dividend                         min_int
 518     //         reg: divisor   (may not be rax,/rdx)   -1
 519     //
 520     // output: rax,: quotient  (= rax, idiv reg)       min_int
 521     //         rdx: remainder (= rax, irem reg)       0
 522 
 523     // rax, and rdx will be destroyed
 524 
 525     // Note: does this invalidate the spec ???
 526     LIRItem right(x->y(), this);
 527     LIRItem left(x->x() , this);   // visit left second, so that the is_register test is valid
 528 
 529     // call state_for before load_item_force because state_for may
 530     // force the evaluation of other instructions that are needed for
 531     // correct debug info.  Otherwise the live range of the fix
 532     // register might be too long.
 533     CodeEmitInfo* info = state_for(x);
 534 
 535     left.load_item_force(divInOpr());
 536 
 537     right.load_item();
 538 
 539     LIR_Opr result = rlock_result(x);
 540     LIR_Opr result_reg;
 541     if (x->op() == Bytecodes::_idiv) {
 542       result_reg = divOutOpr();
 543     } else {
 544       result_reg = remOutOpr();
 545     }
 546 
 547     if (!ImplicitDiv0Checks) {
 548       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
 549       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
 550     }
 551     LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
 552     if (x->op() == Bytecodes::_irem) {
 553       __ irem(left.result(), right.result(), result_reg, tmp, info);
 554     } else if (x->op() == Bytecodes::_idiv) {
 555       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 556     } else {
 557       ShouldNotReachHere();
 558     }
 559 
 560     __ move(result_reg, result);
 561   } else {
 562     // missing test if instr is commutative and if we should swap
 563     LIRItem left(x->x(),  this);
 564     LIRItem right(x->y(), this);
 565     LIRItem* left_arg = &left;
 566     LIRItem* right_arg = &right;
 567     if (x->is_commutative() && left.is_stack() && right.is_register()) {
 568       // swap them if left is real stack (or cached) and right is real register(not cached)
 569       left_arg = &right;
 570       right_arg = &left;
 571     }
 572 
 573     left_arg->load_item();
 574 
 575     // do not need to load right, as we can handle stack and constants
 576     if (x->op() == Bytecodes::_imul ) {
 577       // check if we can use shift instead
 578       bool use_constant = false;
 579       bool use_tmp = false;
 580       if (right_arg->is_constant()) {
 581         int iconst = right_arg->get_jint_constant();
 582         if (iconst > 0) {
 583           if (is_power_of_2(iconst)) {
 584             use_constant = true;
 585           } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
 586             use_constant = true;
 587             use_tmp = true;
 588           }
 589         }
 590       }
 591       if (use_constant) {
 592         right_arg->dont_load_item();
 593       } else {
 594         right_arg->load_item();
 595       }
 596       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 597       if (use_tmp) {
 598         tmp = new_register(T_INT);
 599       }
 600       rlock_result(x);
 601 
 602       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 603     } else {
 604       right_arg->dont_load_item();
 605       rlock_result(x);
 606       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 607       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 608     }
 609   }
 610 }
 611 
 612 
 613 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 614   // when an operand with use count 1 is the left operand, then it is
 615   // likely that no move for 2-operand-LIR-form is necessary
 616   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 617     x->swap_operands();
 618   }
 619 
 620   ValueTag tag = x->type()->tag();
 621   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 622   switch (tag) {
 623     case floatTag:
 624     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 625     case longTag:    do_ArithmeticOp_Long(x); return;
 626     case intTag:     do_ArithmeticOp_Int(x);  return;
 627   }
 628   ShouldNotReachHere();
 629 }
 630 
 631 
 632 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 633 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 634   // count must always be in rcx
 635   LIRItem value(x->x(), this);
 636   LIRItem count(x->y(), this);
 637 
 638   ValueTag elemType = x->type()->tag();
 639   bool must_load_count = !count.is_constant() || elemType == longTag;
 640   if (must_load_count) {
 641     // count for long must be in register
 642     count.load_item_force(shiftCountOpr());
 643   } else {
 644     count.dont_load_item();
 645   }
 646   value.load_item();
 647   LIR_Opr reg = rlock_result(x);
 648 
 649   shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
 650 }
 651 
 652 
 653 // _iand, _land, _ior, _lor, _ixor, _lxor
 654 void LIRGenerator::do_LogicOp(LogicOp* x) {
 655   // when an operand with use count 1 is the left operand, then it is
 656   // likely that no move for 2-operand-LIR-form is necessary
 657   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 658     x->swap_operands();
 659   }
 660 
 661   LIRItem left(x->x(), this);
 662   LIRItem right(x->y(), this);
 663 
 664   left.load_item();
 665   right.load_nonconstant();
 666   LIR_Opr reg = rlock_result(x);
 667 
 668   logic_op(x->op(), reg, left.result(), right.result());
 669 }
 670 
 671 
 672 
 673 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 674 void LIRGenerator::do_CompareOp(CompareOp* x) {
 675   LIRItem left(x->x(), this);
 676   LIRItem right(x->y(), this);
 677   ValueTag tag = x->x()->type()->tag();
 678   if (tag == longTag) {
 679     left.set_destroys_register();
 680   }
 681   left.load_item();
 682   right.load_item();
 683   LIR_Opr reg = rlock_result(x);
 684 
 685   if (x->x()->type()->is_float_kind()) {
 686     Bytecodes::Code code = x->op();
 687     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 688   } else if (x->x()->type()->tag() == longTag) {
 689     __ lcmp2int(left.result(), right.result(), reg);
 690   } else {
 691     Unimplemented();
 692   }
 693 }
 694 
 695 
 696 void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
 697   assert(x->number_of_arguments() == 3, "wrong type");
 698   LIRItem obj       (x->argument_at(0), this);  // AtomicLong object
 699   LIRItem cmp_value (x->argument_at(1), this);  // value to compare with field
 700   LIRItem new_value (x->argument_at(2), this);  // replace field with new_value if it matches cmp_value
 701 
 702   // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
 703   cmp_value.load_item_force(FrameMap::long0_opr);
 704 
 705   // new value must be in rcx,ebx (hi,lo)
 706   new_value.load_item_force(FrameMap::long1_opr);
 707 
 708   // object pointer register is overwritten with field address
 709   obj.load_item();
 710 
 711   // generate compare-and-swap; produces zero condition if swap occurs
 712   int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
 713   LIR_Opr addr = obj.result();
 714   __ add(addr, LIR_OprFact::intConst(value_offset), addr);
 715   LIR_Opr t1 = LIR_OprFact::illegalOpr;  // no temp needed
 716   LIR_Opr t2 = LIR_OprFact::illegalOpr;  // no temp needed
 717   __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
 718 
 719   // generate conditional move of boolean result
 720   LIR_Opr result = rlock_result(x);
 721   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
 722 }
 723 
 724 
 725 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 726   assert(x->number_of_arguments() == 4, "wrong type");
 727   LIRItem obj   (x->argument_at(0), this);  // object
 728   LIRItem offset(x->argument_at(1), this);  // offset of field
 729   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
 730   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
 731 
 732   assert(obj.type()->tag() == objectTag, "invalid type");
 733 
 734   // In 64bit the type can be long, sparc doesn't have this assert
 735   // assert(offset.type()->tag() == intTag, "invalid type");
 736 
 737   assert(cmp.type()->tag() == type->tag(), "invalid type");
 738   assert(val.type()->tag() == type->tag(), "invalid type");
 739 
 740   // get address of field
 741   obj.load_item();
 742   offset.load_nonconstant();
 743 
 744   if (type == objectType) {
 745     cmp.load_item_force(FrameMap::rax_oop_opr);
 746     val.load_item();
 747   } else if (type == intType) {
 748     cmp.load_item_force(FrameMap::rax_opr);
 749     val.load_item();
 750   } else if (type == longType) {
 751     cmp.load_item_force(FrameMap::long0_opr);
 752     val.load_item_force(FrameMap::long1_opr);
 753   } else {
 754     ShouldNotReachHere();
 755   }
 756 
 757   LIR_Opr addr = new_pointer_register();
 758   __ move(obj.result(), addr);
 759   __ add(addr, offset.result(), addr);
 760 
 761   if (type == objectType) {  // Write-barrier needed for Object fields.
 762     // Do the pre-write barrier, if any.
 763     pre_barrier(addr, false, NULL);
 764   }
 765 
 766   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 767   if (type == objectType)
 768     __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
 769   else if (type == intType)
 770     __ cas_int(addr, cmp.result(), val.result(), ill, ill);
 771   else if (type == longType)
 772     __ cas_long(addr, cmp.result(), val.result(), ill, ill);
 773   else {
 774     ShouldNotReachHere();
 775   }
 776 
 777   // generate conditional move of boolean result
 778   LIR_Opr result = rlock_result(x);
 779   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
 780   if (type == objectType) {   // Write-barrier needed for Object fields.
 781     // Seems to be precise
 782     post_barrier(addr, val.result());
 783   }
 784 }
 785 
 786 
 787 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 788   assert(x->number_of_arguments() == 1, "wrong type");
 789   LIRItem value(x->argument_at(0), this);
 790 
 791   bool use_fpu = false;
 792   if (UseSSE >= 2) {
 793     switch(x->id()) {
 794       case vmIntrinsics::_dsin:
 795       case vmIntrinsics::_dcos:
 796       case vmIntrinsics::_dtan:
 797       case vmIntrinsics::_dlog:
 798       case vmIntrinsics::_dlog10:
 799         use_fpu = true;
 800     }
 801   } else {
 802     value.set_destroys_register();
 803   }
 804 
 805   value.load_item();
 806 
 807   LIR_Opr calc_input = value.result();
 808   LIR_Opr calc_result = rlock_result(x);
 809 
 810   // sin and cos need two free fpu stack slots, so register two temporary operands
 811   LIR_Opr tmp1 = FrameMap::caller_save_fpu_reg_at(0);
 812   LIR_Opr tmp2 = FrameMap::caller_save_fpu_reg_at(1);
 813 
 814   if (use_fpu) {
 815     LIR_Opr tmp = FrameMap::fpu0_double_opr;
 816     __ move(calc_input, tmp);
 817 
 818     calc_input = tmp;
 819     calc_result = tmp;
 820     tmp1 = FrameMap::caller_save_fpu_reg_at(1);
 821     tmp2 = FrameMap::caller_save_fpu_reg_at(2);
 822   }
 823 
 824   switch(x->id()) {
 825     case vmIntrinsics::_dabs:   __ abs  (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
 826     case vmIntrinsics::_dsqrt:  __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
 827     case vmIntrinsics::_dsin:   __ sin  (calc_input, calc_result, tmp1, tmp2);              break;
 828     case vmIntrinsics::_dcos:   __ cos  (calc_input, calc_result, tmp1, tmp2);              break;
 829     case vmIntrinsics::_dtan:   __ tan  (calc_input, calc_result, tmp1, tmp2);              break;
 830     case vmIntrinsics::_dlog:   __ log  (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
 831     case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, LIR_OprFact::illegalOpr); break;
 832     default:                    ShouldNotReachHere();
 833   }
 834 
 835   if (use_fpu) {
 836     __ move(calc_result, x->operand());
 837   }
 838 }
 839 
 840 
 841 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 842   assert(x->number_of_arguments() == 5, "wrong type");
 843   LIRItem src(x->argument_at(0), this);
 844   LIRItem src_pos(x->argument_at(1), this);
 845   LIRItem dst(x->argument_at(2), this);
 846   LIRItem dst_pos(x->argument_at(3), this);
 847   LIRItem length(x->argument_at(4), this);
 848 
 849   // operands for arraycopy must use fixed registers, otherwise
 850   // LinearScan will fail allocation (because arraycopy always needs a
 851   // call)
 852 
 853 #ifndef _LP64
 854   src.load_item_force     (FrameMap::rcx_oop_opr);
 855   src_pos.load_item_force (FrameMap::rdx_opr);
 856   dst.load_item_force     (FrameMap::rax_oop_opr);
 857   dst_pos.load_item_force (FrameMap::rbx_opr);
 858   length.load_item_force  (FrameMap::rdi_opr);
 859   LIR_Opr tmp =           (FrameMap::rsi_opr);
 860 #else
 861 
 862   // The java calling convention will give us enough registers
 863   // so that on the stub side the args will be perfect already.
 864   // On the other slow/special case side we call C and the arg
 865   // positions are not similar enough to pick one as the best.
 866   // Also because the java calling convention is a "shifted" version
 867   // of the C convention we can process the java args trivially into C
 868   // args without worry of overwriting during the xfer
 869 
 870   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 871   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 872   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 873   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 874   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 875 
 876   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 877 #endif // LP64
 878 
 879   set_no_result(x);
 880 
 881   int flags;
 882   ciArrayKlass* expected_type;
 883   arraycopy_helper(x, &flags, &expected_type);
 884 
 885   CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?)
 886   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 887 }
 888 
 889 
 890 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 891 // _i2b, _i2c, _i2s
 892 LIR_Opr fixed_register_for(BasicType type) {
 893   switch (type) {
 894     case T_FLOAT:  return FrameMap::fpu0_float_opr;
 895     case T_DOUBLE: return FrameMap::fpu0_double_opr;
 896     case T_INT:    return FrameMap::rax_opr;
 897     case T_LONG:   return FrameMap::long0_opr;
 898     default:       ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 899   }
 900 }
 901 
 902 void LIRGenerator::do_Convert(Convert* x) {
 903   // flags that vary for the different operations and different SSE-settings
 904   bool fixed_input, fixed_result, round_result, needs_stub;
 905 
 906   switch (x->op()) {
 907     case Bytecodes::_i2l: // fall through
 908     case Bytecodes::_l2i: // fall through
 909     case Bytecodes::_i2b: // fall through
 910     case Bytecodes::_i2c: // fall through
 911     case Bytecodes::_i2s: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
 912 
 913     case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false;       round_result = false;      needs_stub = false; break;
 914     case Bytecodes::_d2f: fixed_input = false;       fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break;
 915     case Bytecodes::_i2f: fixed_input = false;       fixed_result = false;       round_result = UseSSE < 1; needs_stub = false; break;
 916     case Bytecodes::_i2d: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
 917     case Bytecodes::_f2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
 918     case Bytecodes::_d2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
 919     case Bytecodes::_l2f: fixed_input = false;       fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break;
 920     case Bytecodes::_l2d: fixed_input = false;       fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break;
 921     case Bytecodes::_f2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
 922     case Bytecodes::_d2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
 923     default: ShouldNotReachHere();
 924   }
 925 
 926   LIRItem value(x->value(), this);
 927   value.load_item();
 928   LIR_Opr input = value.result();
 929   LIR_Opr result = rlock(x);
 930 
 931   // arguments of lir_convert
 932   LIR_Opr conv_input = input;
 933   LIR_Opr conv_result = result;
 934   ConversionStub* stub = NULL;
 935 
 936   if (fixed_input) {
 937     conv_input = fixed_register_for(input->type());
 938     __ move(input, conv_input);
 939   }
 940 
 941   assert(fixed_result == false || round_result == false, "cannot set both");
 942   if (fixed_result) {
 943     conv_result = fixed_register_for(result->type());
 944   } else if (round_result) {
 945     result = new_register(result->type());
 946     set_vreg_flag(result, must_start_in_memory);
 947   }
 948 
 949   if (needs_stub) {
 950     stub = new ConversionStub(x->op(), conv_input, conv_result);
 951   }
 952 
 953   __ convert(x->op(), conv_input, conv_result, stub);
 954 
 955   if (result != conv_result) {
 956     __ move(conv_result, result);
 957   }
 958 
 959   assert(result->is_virtual(), "result must be virtual register");
 960   set_result(x, result);
 961 }
 962 
 963 
 964 void LIRGenerator::do_NewInstance(NewInstance* x) {
 965   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 966     tty->print_cr("   ###class not loaded at new bci %d", x->bci());
 967   }
 968   CodeEmitInfo* info = state_for(x, x->state());
 969   LIR_Opr reg = result_register_for(x->type());
 970   LIR_Opr klass_reg = new_register(objectType);
 971   new_instance(reg, x->klass(),
 972                        FrameMap::rcx_oop_opr,
 973                        FrameMap::rdi_oop_opr,
 974                        FrameMap::rsi_oop_opr,
 975                        LIR_OprFact::illegalOpr,
 976                        FrameMap::rdx_oop_opr, info);
 977   LIR_Opr result = rlock_result(x);
 978   __ move(reg, result);
 979 }
 980 
 981 
 982 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 983   CodeEmitInfo* info = state_for(x, x->state());
 984 
 985   LIRItem length(x->length(), this);
 986   length.load_item_force(FrameMap::rbx_opr);
 987 
 988   LIR_Opr reg = result_register_for(x->type());
 989   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
 990   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
 991   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
 992   LIR_Opr tmp4 = reg;
 993   LIR_Opr klass_reg = FrameMap::rdx_oop_opr;
 994   LIR_Opr len = length.result();
 995   BasicType elem_type = x->elt_type();
 996 
 997   __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
 998 
 999   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1000   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1001 
1002   LIR_Opr result = rlock_result(x);
1003   __ move(reg, result);
1004 }
1005 
1006 
1007 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1008   LIRItem length(x->length(), this);
1009   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1010   // and therefore provide the state before the parameters have been consumed
1011   CodeEmitInfo* patching_info = NULL;
1012   if (!x->klass()->is_loaded() || PatchALot) {
1013     patching_info =  state_for(x, x->state_before());
1014   }
1015 
1016   CodeEmitInfo* info = state_for(x, x->state());
1017 
1018   const LIR_Opr reg = result_register_for(x->type());
1019   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1020   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1021   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1022   LIR_Opr tmp4 = reg;
1023   LIR_Opr klass_reg = FrameMap::rdx_oop_opr;
1024 
1025   length.load_item_force(FrameMap::rbx_opr);
1026   LIR_Opr len = length.result();
1027 
1028   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1029   ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
1030   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1031     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1032   }
1033   jobject2reg_with_patching(klass_reg, obj, patching_info);
1034   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1035 
1036   LIR_Opr result = rlock_result(x);
1037   __ move(reg, result);
1038 }
1039 
1040 
1041 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1042   Values* dims = x->dims();
1043   int i = dims->length();
1044   LIRItemList* items = new LIRItemList(dims->length(), NULL);
1045   while (i-- > 0) {
1046     LIRItem* size = new LIRItem(dims->at(i), this);
1047     items->at_put(i, size);
1048   }
1049 
1050   // Evaluate state_for early since it may emit code.
1051   CodeEmitInfo* patching_info = NULL;
1052   if (!x->klass()->is_loaded() || PatchALot) {
1053     patching_info = state_for(x, x->state_before());
1054 
1055     // cannot re-use same xhandlers for multiple CodeEmitInfos, so
1056     // clone all handlers.  This is handled transparently in other
1057     // places by the CodeEmitInfo cloning logic but is handled
1058     // specially here because a stub isn't being used.
1059     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1060   }
1061   CodeEmitInfo* info = state_for(x, x->state());
1062 
1063   i = dims->length();
1064   while (i-- > 0) {
1065     LIRItem* size = items->at(i);
1066     size->load_nonconstant();
1067 
1068     store_stack_parameter(size->result(), in_ByteSize(i*4));
1069   }
1070 
1071   LIR_Opr reg = result_register_for(x->type());
1072   jobject2reg_with_patching(reg, x->klass(), patching_info);
1073 
1074   LIR_Opr rank = FrameMap::rbx_opr;
1075   __ move(LIR_OprFact::intConst(x->rank()), rank);
1076   LIR_Opr varargs = FrameMap::rcx_opr;
1077   __ move(FrameMap::rsp_opr, varargs);
1078   LIR_OprList* args = new LIR_OprList(3);
1079   args->append(reg);
1080   args->append(rank);
1081   args->append(varargs);
1082   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1083                   LIR_OprFact::illegalOpr,
1084                   reg, args, info);
1085 
1086   LIR_Opr result = rlock_result(x);
1087   __ move(reg, result);
1088 }
1089 
1090 
1091 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1092   // nothing to do for now
1093 }
1094 
1095 
1096 void LIRGenerator::do_CheckCast(CheckCast* x) {
1097   LIRItem obj(x->obj(), this);
1098 
1099   CodeEmitInfo* patching_info = NULL;
1100   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1101     // must do this before locking the destination register as an oop register,
1102     // and before the obj is loaded (the latter is for deoptimization)
1103     patching_info = state_for(x, x->state_before());
1104   }
1105   obj.load_item();
1106 
1107   // info for exceptions
1108   CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks());
1109 
1110   CodeStub* stub;
1111   if (x->is_incompatible_class_change_check()) {
1112     assert(patching_info == NULL, "can't patch this");
1113     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1114   } else {
1115     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1116   }
1117   LIR_Opr reg = rlock_result(x);
1118   __ checkcast(reg, obj.result(), x->klass(),
1119                new_register(objectType), new_register(objectType),
1120                !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
1121                x->direct_compare(), info_for_exception, patching_info, stub,
1122                x->profiled_method(), x->profiled_bci());
1123 }
1124 
1125 
1126 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1127   LIRItem obj(x->obj(), this);
1128 
1129   // result and test object may not be in same register
1130   LIR_Opr reg = rlock_result(x);
1131   CodeEmitInfo* patching_info = NULL;
1132   if ((!x->klass()->is_loaded() || PatchALot)) {
1133     // must do this before locking the destination register as an oop register
1134     patching_info = state_for(x, x->state_before());
1135   }
1136   obj.load_item();
1137   LIR_Opr tmp = new_register(objectType);
1138   __ instanceof(reg, obj.result(), x->klass(),
1139                 tmp, new_register(objectType), LIR_OprFact::illegalOpr,
1140                 x->direct_compare(), patching_info);
1141 }
1142 
1143 
1144 void LIRGenerator::do_If(If* x) {
1145   assert(x->number_of_sux() == 2, "inconsistency");
1146   ValueTag tag = x->x()->type()->tag();
1147   bool is_safepoint = x->is_safepoint();
1148 
1149   If::Condition cond = x->cond();
1150 
1151   LIRItem xitem(x->x(), this);
1152   LIRItem yitem(x->y(), this);
1153   LIRItem* xin = &xitem;
1154   LIRItem* yin = &yitem;
1155 
1156   if (tag == longTag) {
1157     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1158     // mirror for other conditions
1159     if (cond == If::gtr || cond == If::leq) {
1160       cond = Instruction::mirror(cond);
1161       xin = &yitem;
1162       yin = &xitem;
1163     }
1164     xin->set_destroys_register();
1165   }
1166   xin->load_item();
1167   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1168     // inline long zero
1169     yin->dont_load_item();
1170   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1171     // longs cannot handle constants at right side
1172     yin->load_item();
1173   } else {
1174     yin->dont_load_item();
1175   }
1176 
1177   // add safepoint before generating condition code so it can be recomputed
1178   if (x->is_safepoint()) {
1179     // increment backedge counter if needed
1180     increment_backedge_counter(state_for(x, x->state_before()));
1181 
1182     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1183   }
1184   set_no_result(x);
1185 
1186   LIR_Opr left = xin->result();
1187   LIR_Opr right = yin->result();
1188   __ cmp(lir_cond(cond), left, right);
1189   profile_branch(x, cond);
1190   move_to_phi(x->state());
1191   if (x->x()->type()->is_float_kind()) {
1192     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1193   } else {
1194     __ branch(lir_cond(cond), right->type(), x->tsux());
1195   }
1196   assert(x->default_sux() == x->fsux(), "wrong destination above");
1197   __ jump(x->default_sux());
1198 }
1199 
1200 
1201 LIR_Opr LIRGenerator::getThreadPointer() {
1202 #ifdef _LP64
1203   return FrameMap::as_pointer_opr(r15_thread);
1204 #else
1205   LIR_Opr result = new_register(T_INT);
1206   __ get_thread(result);
1207   return result;
1208 #endif //
1209 }
1210 
1211 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1212   store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
1213   LIR_OprList* args = new LIR_OprList();
1214   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1215   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1216 }
1217 
1218 
1219 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1220                                         CodeEmitInfo* info) {
1221   if (address->type() == T_LONG) {
1222     address = new LIR_Address(address->base(),
1223                               address->index(), address->scale(),
1224                               address->disp(), T_DOUBLE);
1225     // Transfer the value atomically by using FP moves.  This means
1226     // the value has to be moved between CPU and FPU registers.  It
1227     // always has to be moved through spill slot since there's no
1228     // quick way to pack the value into an SSE register.
1229     LIR_Opr temp_double = new_register(T_DOUBLE);
1230     LIR_Opr spill = new_register(T_LONG);
1231     set_vreg_flag(spill, must_start_in_memory);
1232     __ move(value, spill);
1233     __ volatile_move(spill, temp_double, T_LONG);
1234     __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1235   } else {
1236     __ store(value, address, info);
1237   }
1238 }
1239 
1240 
1241 
1242 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1243                                        CodeEmitInfo* info) {
1244   if (address->type() == T_LONG) {
1245     address = new LIR_Address(address->base(),
1246                               address->index(), address->scale(),
1247                               address->disp(), T_DOUBLE);
1248     // Transfer the value atomically by using FP moves.  This means
1249     // the value has to be moved between CPU and FPU registers.  In
1250     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1251     // SSE2+ mode it can be moved directly.
1252     LIR_Opr temp_double = new_register(T_DOUBLE);
1253     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1254     __ volatile_move(temp_double, result, T_LONG);
1255     if (UseSSE < 2) {
1256       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1257       set_vreg_flag(result, must_start_in_memory);
1258     }
1259   } else {
1260     __ load(address, result, info);
1261   }
1262 }
1263 
1264 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1265                                      BasicType type, bool is_volatile) {
1266   if (is_volatile && type == T_LONG) {
1267     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1268     LIR_Opr tmp = new_register(T_DOUBLE);
1269     __ load(addr, tmp);
1270     LIR_Opr spill = new_register(T_LONG);
1271     set_vreg_flag(spill, must_start_in_memory);
1272     __ move(tmp, spill);
1273     __ move(spill, dst);
1274   } else {
1275     LIR_Address* addr = new LIR_Address(src, offset, type);
1276     __ load(addr, dst);
1277   }
1278 }
1279 
1280 
1281 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1282                                      BasicType type, bool is_volatile) {
1283   if (is_volatile && type == T_LONG) {
1284     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1285     LIR_Opr tmp = new_register(T_DOUBLE);
1286     LIR_Opr spill = new_register(T_DOUBLE);
1287     set_vreg_flag(spill, must_start_in_memory);
1288     __ move(data, spill);
1289     __ move(spill, tmp);
1290     __ move(tmp, addr);
1291   } else {
1292     LIR_Address* addr = new LIR_Address(src, offset, type);
1293     bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1294     if (is_obj) {
1295       // Do the pre-write barrier, if any.
1296       pre_barrier(LIR_OprFact::address(addr), false, NULL);
1297       __ move(data, addr);
1298       assert(src->is_register(), "must be register");
1299       // Seems to be a precise address
1300       post_barrier(LIR_OprFact::address(addr), data);
1301     } else {
1302       __ move(data, addr);
1303     }
1304   }
1305 }