< prev index next >

src/cpu/x86/vm/c1_LIRGenerator_x86.cpp

Print this page
rev 12906 : [mq]: gc_interface


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciObjArrayKlass.hpp"
  35 #include "ci/ciTypeArrayKlass.hpp"

  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "vmreg_x86.inline.hpp"
  39 
  40 #ifdef ASSERT
  41 #define __ gen()->lir(__FILE__, __LINE__)->
  42 #else
  43 #define __ gen()->lir()->
  44 #endif
  45 
  46 // Item will be loaded into a byte register; Intel only
  47 void LIRItem::load_byte_item() {
  48   load_item();
  49   LIR_Opr res = result();
  50 
  51   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
  52     // make sure that it is a byte register
  53     assert(!value()->type()->is_float() && !value()->type()->is_double(),
  54            "can't load floats in byte register");
  55     LIR_Opr reg = _gen->rlock_byte(T_BYTE);


 134   return v->type()->tag() != objectTag ||
 135     (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
 136 }
 137 
 138 
 139 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 140   if (c->type() == T_LONG) return false;
 141   return c->type() != T_OBJECT || c->as_jobject() == NULL;
 142 }
 143 
 144 
 145 LIR_Opr LIRGenerator::safepoint_poll_register() {
 146   return LIR_OprFact::illegalOpr;
 147 }
 148 
 149 
 150 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 151                                             int shift, int disp, BasicType type) {
 152   assert(base->is_register(), "must be");
 153   if (index->is_constant()) {

















 154     return new LIR_Address(base,
 155                            ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp,
 156                            type);

 157   } else {
 158     return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 159   }
 160 }
 161 
 162 
 163 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 164                                               BasicType type, bool needs_card_mark) {
 165   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 166 
 167   LIR_Address* addr;
 168   if (index_opr->is_constant()) {
 169     int elem_size = type2aelembytes(type);
 170     addr = new LIR_Address(array_opr,
 171                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 172   } else {
 173 #ifdef _LP64
 174     if (index_opr->type() == T_INT) {
 175       LIR_Opr tmp = new_register(T_LONG);
 176       __ convert(Bytecodes::_i2l, index_opr, tmp);


 240       __ move(left, tmp);
 241       __ shift_left(left, log2_intptr(c + 1), left);
 242       __ sub(left, tmp, result);
 243       return true;
 244     } else if (is_power_of_2(c - 1)) {
 245       __ move(left, tmp);
 246       __ shift_left(left, log2_intptr(c - 1), left);
 247       __ add(left, tmp, result);
 248       return true;
 249     }
 250   }
 251   return false;
 252 }
 253 
 254 
 255 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 256   BasicType type = item->type();
 257   __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
 258 }
 259 
 260 //----------------------------------------------------------------------
 261 //             visitor functions
 262 //----------------------------------------------------------------------
 263 
 264 
 265 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 266   assert(x->is_pinned(),"");
 267   bool needs_range_check = x->compute_needs_range_check();
 268   bool use_length = x->length() != NULL;
 269   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 270   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 271                                          !get_jobject_constant(x->value())->is_null_object() ||
 272                                          x->should_profile());
 273 
 274   LIRItem array(x->array(), this);
 275   LIRItem index(x->index(), this);
 276   LIRItem value(x->value(), this);
 277   LIRItem length(this);
 278 
 279   array.load_item();
 280   index.load_nonconstant();
 281 
 282   if (use_length && needs_range_check) {
 283     length.set_instruction(x->length());
 284     length.load_item();
 285 
 286   }
 287   if (needs_store_check || x->check_boolean()) {
 288     value.load_item();
 289   } else {
 290     value.load_for_store(x->elt_type());
 291   }
 292 
 293   set_no_result(x);
 294 
 295   // the CodeEmitInfo must be duplicated for each different
 296   // LIR-instruction because spilling can occur anywhere between two
 297   // instructions and so the debug information must be different
 298   CodeEmitInfo* range_check_info = state_for(x);
 299   CodeEmitInfo* null_check_info = NULL;
 300   if (x->needs_null_check()) {
 301     null_check_info = new CodeEmitInfo(range_check_info);
 302   }
 303 
 304   // emit array address setup early so it schedules better
 305   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 306 
 307   if (GenerateRangeChecks && needs_range_check) {
 308     if (use_length) {
 309       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 310       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 311     } else {
 312       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 313       // range_check also does the null check
 314       null_check_info = NULL;
 315     }
 316   }
 317 
 318   if (GenerateArrayStoreCheck && needs_store_check) {
 319     LIR_Opr tmp1 = new_register(objectType);
 320     LIR_Opr tmp2 = new_register(objectType);
 321     LIR_Opr tmp3 = new_register(objectType);
 322 
 323     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 324     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 325   }
 326 
 327   if (obj_store) {
 328     // Needs GC write barriers.
 329     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 330                 true /* do_load */, false /* patch */, NULL);
 331     __ move(value.result(), array_addr, null_check_info);
 332     // Seems to be a precise
 333     post_barrier(LIR_OprFact::address(array_addr), value.result());
 334   } else {
 335     LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 336     __ move(result, array_addr, null_check_info);
 337   }
 338 }
 339 



 340 
 341 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 342   assert(x->is_pinned(),"");
 343   LIRItem obj(x->obj(), this);
 344   obj.load_item();
 345 
 346   set_no_result(x);
 347 
 348   // "lock" stores the address of the monitor stack slot, so this is not an oop
 349   LIR_Opr lock = new_register(T_INT);
 350   // Need a scratch register for biased locking on x86
 351   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 352   if (UseBiasedLocking) {
 353     scratch = new_register(T_INT);
 354   }
 355 
 356   CodeEmitInfo* info_for_exception = NULL;
 357   if (x->needs_null_check()) {
 358     info_for_exception = state_for(x);
 359   }


 700   LIRItem left(x->x(), this);
 701   LIRItem right(x->y(), this);
 702   ValueTag tag = x->x()->type()->tag();
 703   if (tag == longTag) {
 704     left.set_destroys_register();
 705   }
 706   left.load_item();
 707   right.load_item();
 708   LIR_Opr reg = rlock_result(x);
 709 
 710   if (x->x()->type()->is_float_kind()) {
 711     Bytecodes::Code code = x->op();
 712     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 713   } else if (x->x()->type()->tag() == longTag) {
 714     __ lcmp2int(left.result(), right.result(), reg);
 715   } else {
 716     Unimplemented();
 717   }
 718 }
 719 
 720 
 721 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 722   assert(x->number_of_arguments() == 4, "wrong type");
 723   LIRItem obj   (x->argument_at(0), this);  // object
 724   LIRItem offset(x->argument_at(1), this);  // offset of field
 725   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
 726   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
 727 
 728   assert(obj.type()->tag() == objectTag, "invalid type");
 729 
 730   // In 64bit the type can be long, sparc doesn't have this assert
 731   // assert(offset.type()->tag() == intTag, "invalid type");
 732 
 733   assert(cmp.type()->tag() == type->tag(), "invalid type");
 734   assert(val.type()->tag() == type->tag(), "invalid type");
 735 
 736   // get address of field
 737   obj.load_item();
 738   offset.load_nonconstant();
 739 
 740   LIR_Opr addr = new_pointer_register();
 741   LIR_Address* a;
 742   if(offset.result()->is_constant()) {
 743 #ifdef _LP64
 744     jlong c = offset.result()->as_jlong();
 745     if ((jlong)((jint)c) == c) {
 746       a = new LIR_Address(obj.result(),
 747                           (jint)c,
 748                           as_BasicType(type));
 749     } else {
 750       LIR_Opr tmp = new_register(T_LONG);
 751       __ move(offset.result(), tmp);
 752       a = new LIR_Address(obj.result(),
 753                           tmp,
 754                           as_BasicType(type));
 755     }
 756 #else
 757     a = new LIR_Address(obj.result(),
 758                         offset.result()->as_jint(),
 759                         as_BasicType(type));
 760 #endif
 761   } else {
 762     a = new LIR_Address(obj.result(),
 763                         offset.result(),
 764                         0,
 765                         as_BasicType(type));
 766   }
 767   __ leal(LIR_OprFact::address(a), addr);
 768 
 769   if (type == objectType) {  // Write-barrier needed for Object fields.
 770     // Do the pre-write barrier, if any.
 771     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 772                 true /* do_load */, false /* patch */, NULL);
 773   }
 774 
 775   if (type == objectType) {
 776     cmp.load_item_force(FrameMap::rax_oop_opr);
 777     val.load_item();
 778   } else if (type == intType) {
 779     cmp.load_item_force(FrameMap::rax_opr);
 780     val.load_item();
 781   } else if (type == longType) {
 782     cmp.load_item_force(FrameMap::long0_opr);
 783     val.load_item_force(FrameMap::long1_opr);
 784   } else {
 785     ShouldNotReachHere();
 786   }




 787 
 788   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 789   if (type == objectType)
 790     __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
 791   else if (type == intType)
 792     __ cas_int(addr, cmp.result(), val.result(), ill, ill);
 793   else if (type == longType)
 794     __ cas_long(addr, cmp.result(), val.result(), ill, ill);
 795   else {
 796     ShouldNotReachHere();
 797   }
 798 
 799   // generate conditional move of boolean result
 800   LIR_Opr result = rlock_result(x);
 801   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 802            result, as_BasicType(type));
 803   if (type == objectType) {   // Write-barrier needed for Object fields.
 804     // Seems to be precise
 805     post_barrier(addr, val.result());
 806   }
 807 }
 808 
 809 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 810   assert(x->number_of_arguments() == 3, "wrong type");
 811   assert(UseFMA, "Needs FMA instructions support.");
 812   LIRItem value(x->argument_at(0), this);
 813   LIRItem value1(x->argument_at(1), this);
 814   LIRItem value2(x->argument_at(2), this);
 815 
 816   value2.set_destroys_register();
 817 
 818   value.load_item();
 819   value1.load_item();
 820   value2.load_item();
 821 
 822   LIR_Opr calc_input = value.result();
 823   LIR_Opr calc_input1 = value1.result();
 824   LIR_Opr calc_input2 = value2.result();
 825   LIR_Opr calc_result = rlock_result(x);
 826 


1550                                         CodeEmitInfo* info) {
1551   if (address->type() == T_LONG) {
1552     address = new LIR_Address(address->base(),
1553                               address->index(), address->scale(),
1554                               address->disp(), T_DOUBLE);
1555     // Transfer the value atomically by using FP moves.  This means
1556     // the value has to be moved between CPU and FPU registers.  It
1557     // always has to be moved through spill slot since there's no
1558     // quick way to pack the value into an SSE register.
1559     LIR_Opr temp_double = new_register(T_DOUBLE);
1560     LIR_Opr spill = new_register(T_LONG);
1561     set_vreg_flag(spill, must_start_in_memory);
1562     __ move(value, spill);
1563     __ volatile_move(spill, temp_double, T_LONG);
1564     __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1565   } else {
1566     __ store(value, address, info);
1567   }
1568 }
1569 
1570 
1571 
1572 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1573                                        CodeEmitInfo* info) {
1574   if (address->type() == T_LONG) {
1575     address = new LIR_Address(address->base(),
1576                               address->index(), address->scale(),
1577                               address->disp(), T_DOUBLE);
1578     // Transfer the value atomically by using FP moves.  This means
1579     // the value has to be moved between CPU and FPU registers.  In
1580     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1581     // SSE2+ mode it can be moved directly.
1582     LIR_Opr temp_double = new_register(T_DOUBLE);
1583     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1584     __ volatile_move(temp_double, result, T_LONG);
1585     if (UseSSE < 2) {
1586       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1587       set_vreg_flag(result, must_start_in_memory);
1588     }
1589   } else {
1590     __ load(address, result, info);
1591   }
1592 }
1593 
1594 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1595                                      BasicType type, bool is_volatile) {
1596   if (is_volatile && type == T_LONG) {
1597     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1598     LIR_Opr tmp = new_register(T_DOUBLE);
1599     __ load(addr, tmp);
1600     LIR_Opr spill = new_register(T_LONG);
1601     set_vreg_flag(spill, must_start_in_memory);
1602     __ move(tmp, spill);
1603     __ move(spill, dst);
1604   } else {
1605     LIR_Address* addr = new LIR_Address(src, offset, type);
1606     __ load(addr, dst);
1607   }
1608 }
1609 
1610 
1611 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1612                                      BasicType type, bool is_volatile) {
1613   if (is_volatile && type == T_LONG) {
1614     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1615     LIR_Opr tmp = new_register(T_DOUBLE);
1616     LIR_Opr spill = new_register(T_DOUBLE);
1617     set_vreg_flag(spill, must_start_in_memory);
1618     __ move(data, spill);
1619     __ move(spill, tmp);
1620     __ move(tmp, addr);
1621   } else {
1622     LIR_Address* addr = new LIR_Address(src, offset, type);
1623     bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1624     if (is_obj) {
1625       // Do the pre-write barrier, if any.
1626       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1627                   true /* do_load */, false /* patch */, NULL);
1628       __ move(data, addr);
1629       assert(src->is_register(), "must be register");
1630       // Seems to be a precise address
1631       post_barrier(LIR_OprFact::address(addr), data);
1632     } else {
1633       __ move(data, addr);
1634     }
1635   }
1636 }
1637 
1638 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1639   BasicType type = x->basic_type();
1640   LIRItem src(x->object(), this);
1641   LIRItem off(x->offset(), this);
1642   LIRItem value(x->value(), this);
1643 
1644   src.load_item();
1645   value.load_item();
1646   off.load_nonconstant();
1647 
1648   LIR_Opr dst = rlock_result(x, type);
1649   LIR_Opr data = value.result();
1650   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1651   LIR_Opr offset = off.result();
1652 
1653   assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
1654   LIR_Address* addr;
1655   if (offset->is_constant()) {
1656 #ifdef _LP64
1657     jlong c = offset->as_jlong();
1658     if ((jlong)((jint)c) == c) {
1659       addr = new LIR_Address(src.result(), (jint)c, type);
1660     } else {
1661       LIR_Opr tmp = new_register(T_LONG);
1662       __ move(offset, tmp);
1663       addr = new LIR_Address(src.result(), tmp, type);
1664     }
1665 #else
1666     addr = new LIR_Address(src.result(), offset->as_jint(), type);
1667 #endif
1668   } else {
1669     addr = new LIR_Address(src.result(), offset, type);
1670   }
1671 
1672   // Because we want a 2-arg form of xchg and xadd
1673   __ move(data, dst);
1674 
1675   if (x->is_add()) {
1676     __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1677   } else {
1678     if (is_obj) {
1679       // Do the pre-write barrier, if any.
1680       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1681                   true /* do_load */, false /* patch */, NULL);
1682     }
1683     __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1684     if (is_obj) {
1685       // Seems to be a precise address
1686       post_barrier(LIR_OprFact::address(addr), data);
1687     }
1688   }
1689 }


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciObjArrayKlass.hpp"
  35 #include "ci/ciTypeArrayKlass.hpp"
  36 #include "gc/shared/c1BarrierSetCodeGen.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "vmreg_x86.inline.hpp"
  40 
  41 #ifdef ASSERT
  42 #define __ gen()->lir(__FILE__, __LINE__)->
  43 #else
  44 #define __ gen()->lir()->
  45 #endif
  46 
  47 // Item will be loaded into a byte register; Intel only
  48 void LIRItem::load_byte_item() {
  49   load_item();
  50   LIR_Opr res = result();
  51 
  52   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
  53     // make sure that it is a byte register
  54     assert(!value()->type()->is_float() && !value()->type()->is_double(),
  55            "can't load floats in byte register");
  56     LIR_Opr reg = _gen->rlock_byte(T_BYTE);


 135   return v->type()->tag() != objectTag ||
 136     (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
 137 }
 138 
 139 
 140 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 141   if (c->type() == T_LONG) return false;
 142   return c->type() != T_OBJECT || c->as_jobject() == NULL;
 143 }
 144 
 145 
 146 LIR_Opr LIRGenerator::safepoint_poll_register() {
 147   return LIR_OprFact::illegalOpr;
 148 }
 149 
 150 
 151 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 152                                             int shift, int disp, BasicType type) {
 153   assert(base->is_register(), "must be");
 154   if (index->is_constant()) {
 155     LIR_Const *constant = index->as_constant_ptr();
 156 #ifdef _LP64
 157     jlong c;
 158     if (constant->type() == T_INT) {
 159       c = (jlong(index->as_jint()) << shift) + disp;
 160     } else {
 161       assert(constant->type() == T_LONG, "should be");
 162       c = (index->as_jlong() << shift) + disp;
 163     }
 164     if ((jlong)((jint)c) == c) {
 165       return new LIR_Address(base, (jint)c, type);
 166     } else {
 167       LIR_Opr tmp = new_register(T_LONG);
 168       __ move(index, tmp);
 169       return new LIR_Address(base, tmp, type);
 170     }
 171 #else
 172     return new LIR_Address(base,
 173                            ((intx)(constant->as_jint()) << shift) + disp,
 174                            type);
 175 #endif
 176   } else {
 177     return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 178   }
 179 }
 180 
 181 
 182 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 183                                               BasicType type, bool needs_card_mark) {
 184   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 185 
 186   LIR_Address* addr;
 187   if (index_opr->is_constant()) {
 188     int elem_size = type2aelembytes(type);
 189     addr = new LIR_Address(array_opr,
 190                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 191   } else {
 192 #ifdef _LP64
 193     if (index_opr->type() == T_INT) {
 194       LIR_Opr tmp = new_register(T_LONG);
 195       __ convert(Bytecodes::_i2l, index_opr, tmp);


 259       __ move(left, tmp);
 260       __ shift_left(left, log2_intptr(c + 1), left);
 261       __ sub(left, tmp, result);
 262       return true;
 263     } else if (is_power_of_2(c - 1)) {
 264       __ move(left, tmp);
 265       __ shift_left(left, log2_intptr(c - 1), left);
 266       __ add(left, tmp, result);
 267       return true;
 268     }
 269   }
 270   return false;
 271 }
 272 
 273 
 274 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 275   BasicType type = item->type();
 276   __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
 277 }
 278 
 279 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {


























































 280   LIR_Opr tmp1 = new_register(objectType);
 281   LIR_Opr tmp2 = new_register(objectType);
 282   LIR_Opr tmp3 = new_register(objectType);
 283   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);















 284 }
 285 
 286 //----------------------------------------------------------------------
 287 //             visitor functions
 288 //----------------------------------------------------------------------
 289 
 290 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 291   assert(x->is_pinned(),"");
 292   LIRItem obj(x->obj(), this);
 293   obj.load_item();
 294 
 295   set_no_result(x);
 296 
 297   // "lock" stores the address of the monitor stack slot, so this is not an oop
 298   LIR_Opr lock = new_register(T_INT);
 299   // Need a scratch register for biased locking on x86
 300   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 301   if (UseBiasedLocking) {
 302     scratch = new_register(T_INT);
 303   }
 304 
 305   CodeEmitInfo* info_for_exception = NULL;
 306   if (x->needs_null_check()) {
 307     info_for_exception = state_for(x);
 308   }


 649   LIRItem left(x->x(), this);
 650   LIRItem right(x->y(), this);
 651   ValueTag tag = x->x()->type()->tag();
 652   if (tag == longTag) {
 653     left.set_destroys_register();
 654   }
 655   left.load_item();
 656   right.load_item();
 657   LIR_Opr reg = rlock_result(x);
 658 
 659   if (x->x()->type()->is_float_kind()) {
 660     Bytecodes::Code code = x->op();
 661     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 662   } else if (x->x()->type()->tag() == longTag) {
 663     __ lcmp2int(left.result(), right.result(), reg);
 664   } else {
 665     Unimplemented();
 666   }
 667 }
 668 
 669 LIR_Opr LIRGenerator::cas(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 670   LIR_Opr result = new_register(T_INT);
 671   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 672   if (type == T_OBJECT || type == T_ARRAY) {
 673     cmp_value.load_item_force(FrameMap::rax_oop_opr);
 674     new_value.load_item();
 675     __ cas_obj(addr, cmp_value.result(), new_value.result(), ill, ill);
 676   } else if (type == T_INT) {
 677     cmp_value.load_item_force(FrameMap::rax_opr);
 678     new_value.load_item();
 679     __ cas_int(addr, cmp_value.result(), new_value.result(), ill, ill);
 680   } else if (type == T_LONG) {
 681     cmp_value.load_item_force(FrameMap::long0_opr);
 682     new_value.load_item_force(FrameMap::long1_opr);
 683     __ cas_long(addr, cmp_value.result(), new_value.result(), ill, ill);

















































 684   } else {
 685     Unimplemented();
 686   }
 687   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 688            result, as_BasicType(new_value.type()));
 689   return result;
 690 }
 691 
 692 LIR_Opr LIRGenerator::swap(BasicType type, LIR_Opr addr, LIRItem& value) {
 693   bool is_oop = type == T_OBJECT || type == T_ARRAY;
 694   LIR_Opr result = new_register(type);
 695   value.load_item();
 696   // Because we want a 2-arg form of xchg and xadd
 697   __ move(value.result(), result);
 698   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 699   __ xchg(addr, result, result, LIR_OprFact::illegalOpr);
 700   return result;
 701 }
 702 
 703 LIR_Opr LIRGenerator::add(BasicType type, LIR_Opr addr, LIRItem& value) {
 704   LIR_Opr result = new_register(type);
 705   value.load_item();
 706   // Because we want a 2-arg form of xchg and xadd
 707   __ move(value.result(), result);
 708   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 709   __ xadd(addr, result, result, LIR_OprFact::illegalOpr);
 710   return result;
 711 }
 712 
 713 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 714   assert(x->number_of_arguments() == 3, "wrong type");
 715   assert(UseFMA, "Needs FMA instructions support.");
 716   LIRItem value(x->argument_at(0), this);
 717   LIRItem value1(x->argument_at(1), this);
 718   LIRItem value2(x->argument_at(2), this);
 719 
 720   value2.set_destroys_register();
 721 
 722   value.load_item();
 723   value1.load_item();
 724   value2.load_item();
 725 
 726   LIR_Opr calc_input = value.result();
 727   LIR_Opr calc_input1 = value1.result();
 728   LIR_Opr calc_input2 = value2.result();
 729   LIR_Opr calc_result = rlock_result(x);
 730 


1454                                         CodeEmitInfo* info) {
1455   if (address->type() == T_LONG) {
1456     address = new LIR_Address(address->base(),
1457                               address->index(), address->scale(),
1458                               address->disp(), T_DOUBLE);
1459     // Transfer the value atomically by using FP moves.  This means
1460     // the value has to be moved between CPU and FPU registers.  It
1461     // always has to be moved through spill slot since there's no
1462     // quick way to pack the value into an SSE register.
1463     LIR_Opr temp_double = new_register(T_DOUBLE);
1464     LIR_Opr spill = new_register(T_LONG);
1465     set_vreg_flag(spill, must_start_in_memory);
1466     __ move(value, spill);
1467     __ volatile_move(spill, temp_double, T_LONG);
1468     __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1469   } else {
1470     __ store(value, address, info);
1471   }
1472 }
1473 


1474 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1475                                        CodeEmitInfo* info) {
1476   if (address->type() == T_LONG) {
1477     address = new LIR_Address(address->base(),
1478                               address->index(), address->scale(),
1479                               address->disp(), T_DOUBLE);
1480     // Transfer the value atomically by using FP moves.  This means
1481     // the value has to be moved between CPU and FPU registers.  In
1482     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1483     // SSE2+ mode it can be moved directly.
1484     LIR_Opr temp_double = new_register(T_DOUBLE);
1485     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1486     __ volatile_move(temp_double, result, T_LONG);
1487     if (UseSSE < 2) {
1488       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1489       set_vreg_flag(result, must_start_in_memory);
1490     }
1491   } else {
1492     __ load(address, result, info);

































































































1493   }
1494 }
< prev index next >