< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 12906 : [mq]: gc_interface


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"

  37 #include "runtime/arguments.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/vm_version.hpp"
  41 #include "utilities/bitMap.inline.hpp"
  42 #include "utilities/macros.hpp"
  43 #if INCLUDE_ALL_GCS
  44 #include "gc/g1/heapRegion.hpp"
  45 #endif // INCLUDE_ALL_GCS
  46 #ifdef TRACE_HAVE_INTRINSICS
  47 #include "trace/traceMacros.hpp"
  48 #endif
  49 
  50 #ifdef ASSERT
  51 #define __ gen()->lir(__FILE__, __LINE__)->
  52 #else
  53 #define __ gen()->lir()->
  54 #endif
  55 
  56 #ifndef PATCHED_ADDR
  57 #define PATCHED_ADDR  (max_jint)
  58 #endif
  59 
  60 void PhiResolverState::reset(int max_vregs) {
  61   // Initialize array sizes
  62   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  63   _virtual_operands.trunc_to(0);
  64   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  65   _other_operands.trunc_to(0);


 292 
 293 
 294 jdouble LIRItem::get_jdouble_constant() const {
 295   assert(is_constant() && value() != NULL, "");
 296   assert(type()->as_DoubleConstant() != NULL, "type check");
 297   return type()->as_DoubleConstant()->value();
 298 }
 299 
 300 
 301 jlong LIRItem::get_jlong_constant() const {
 302   assert(is_constant() && value() != NULL, "");
 303   assert(type()->as_LongConstant() != NULL, "type check");
 304   return type()->as_LongConstant()->value();
 305 }
 306 
 307 
 308 
 309 //--------------------------------------------------------------
 310 
 311 
 312 void LIRGenerator::init() {
 313   _bs = Universe::heap()->barrier_set();
 314 }
 315 
 316 
 317 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 318 #ifndef PRODUCT
 319   if (PrintIRWithLIR) {
 320     block->print();
 321   }
 322 #endif
 323 
 324   // set up the list of LIR instructions
 325   assert(block->lir() == NULL, "LIR list already computed for this block");
 326   _lir = new LIR_List(compilation(), block);
 327   block->set_lir(_lir);
 328 
 329   __ branch_destination(block->label());
 330 
 331   if (LIRTraceExecution &&
 332       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 333       !block->is_set(BlockBegin::exception_entry_flag)) {
 334     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 335     trace_block_entry(block);
 336   }


1224 }
1225 
1226 // Examble: ref.get()
1227 // Combination of LoadField and g1 pre-write barrier
1228 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1229 
1230   const int referent_offset = java_lang_ref_Reference::referent_offset;
1231   guarantee(referent_offset > 0, "referent offset not initialized");
1232 
1233   assert(x->number_of_arguments() == 1, "wrong type");
1234 
1235   LIRItem reference(x->argument_at(0), this);
1236   reference.load_item();
1237 
1238   // need to perform the null check on the reference objecy
1239   CodeEmitInfo* info = NULL;
1240   if (x->needs_null_check()) {
1241     info = state_for(x);
1242   }
1243 
1244   LIR_Address* referent_field_adr =
1245     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1246 
1247   LIR_Opr result = rlock_result(x);
1248 
1249   __ load(referent_field_adr, result, info);
1250 
1251   // Register the value in the referent field with the pre-barrier
1252   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1253               result /* pre_val */,
1254               false  /* do_load */,
1255               false  /* patch */,
1256               NULL   /* info */);
1257 }
1258 
1259 // Example: clazz.isInstance(object)
1260 void LIRGenerator::do_isInstance(Intrinsic* x) {
1261   assert(x->number_of_arguments() == 2, "wrong type");
1262 
1263   // TODO could try to substitute this node with an equivalent InstanceOf
1264   // if clazz is known to be a constant Class. This will pick up newly found
1265   // constants after HIR construction. I'll leave this to a future change.
1266 
1267   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1268   // could follow the aastore example in a future change.
1269 
1270   LIRItem clazz(x->argument_at(0), this);
1271   LIRItem object(x->argument_at(1), this);
1272   clazz.load_item();
1273   object.load_item();
1274   LIR_Opr result = rlock_result(x);
1275 
1276   // need to perform null check on clazz


1429       case T_LONG:
1430       case T_DOUBLE:
1431         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1432         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1433         break;
1434       case T_OBJECT:
1435         if (c->as_jobject() != other->as_jobject()) continue;
1436         break;
1437       }
1438       return _reg_for_constants.at(i);
1439     }
1440   }
1441 
1442   LIR_Opr result = new_register(t);
1443   __ move((LIR_Opr)c, result);
1444   _constants.append(c);
1445   _reg_for_constants.append(result);
1446   return result;
1447 }
1448 
1449 // Various barriers
1450 
1451 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1452                                bool do_load, bool patch, CodeEmitInfo* info) {
1453   // Do the pre-write barrier, if any.
1454   switch (_bs->kind()) {
1455 #if INCLUDE_ALL_GCS
1456     case BarrierSet::G1SATBCTLogging:
1457       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1458       break;
1459 #endif // INCLUDE_ALL_GCS
1460     case BarrierSet::CardTableForRS:
1461     case BarrierSet::CardTableExtension:
1462       // No pre barriers
1463       break;
1464     case BarrierSet::ModRef:
1465       // No pre barriers
1466       break;
1467     default      :
1468       ShouldNotReachHere();
1469 
1470   }
1471 }
1472 
1473 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1474   switch (_bs->kind()) {
1475 #if INCLUDE_ALL_GCS
1476     case BarrierSet::G1SATBCTLogging:
1477       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1478       break;
1479 #endif // INCLUDE_ALL_GCS
1480     case BarrierSet::CardTableForRS:
1481     case BarrierSet::CardTableExtension:
1482       CardTableModRef_post_barrier(addr,  new_val);
1483       break;
1484     case BarrierSet::ModRef:
1485       // No post barriers
1486       break;
1487     default      :
1488       ShouldNotReachHere();
1489     }
1490 }
1491 
1492 ////////////////////////////////////////////////////////////////////////
1493 #if INCLUDE_ALL_GCS
1494 
1495 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1496                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1497   // First we test whether marking is in progress.
1498   BasicType flag_type;
1499   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
1500     flag_type = T_INT;
1501   } else {
1502     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
1503               "Assumption");
1504     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
1505     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
1506     flag_type = T_BOOLEAN;
1507   }
1508   LIR_Opr thrd = getThreadPointer();
1509   LIR_Address* mark_active_flag_addr =
1510     new LIR_Address(thrd,
1511                     in_bytes(JavaThread::satb_mark_queue_offset() +
1512                              SATBMarkQueue::byte_offset_of_active()),
1513                     flag_type);
1514   // Read the marking-in-progress flag.
1515   LIR_Opr flag_val = new_register(T_INT);
1516   __ load(mark_active_flag_addr, flag_val);
1517   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1518 
1519   LIR_PatchCode pre_val_patch_code = lir_patch_none;
1520 
1521   CodeStub* slow;
1522 
1523   if (do_load) {
1524     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1525     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1526 
1527     if (patch)
1528       pre_val_patch_code = lir_patch_normal;
1529 
1530     pre_val = new_register(T_OBJECT);
1531 
1532     if (!addr_opr->is_address()) {
1533       assert(addr_opr->is_register(), "must be");
1534       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1535     }
1536     slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1537   } else {
1538     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1539     assert(pre_val->is_register(), "must be");
1540     assert(pre_val->type() == T_OBJECT, "must be an object");
1541     assert(info == NULL, "sanity");
1542 
1543     slow = new G1PreBarrierStub(pre_val);
1544   }
1545 
1546   __ branch(lir_cond_notEqual, T_INT, slow);
1547   __ branch_destination(slow->continuation());
1548 }
1549 
1550 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1551   // If the "new_val" is a constant NULL, no barrier is necessary.
1552   if (new_val->is_constant() &&
1553       new_val->as_constant_ptr()->as_jobject() == NULL) return;
1554 
1555   if (!new_val->is_register()) {
1556     LIR_Opr new_val_reg = new_register(T_OBJECT);
1557     if (new_val->is_constant()) {
1558       __ move(new_val, new_val_reg);
1559     } else {
1560       __ leal(new_val, new_val_reg);
1561     }
1562     new_val = new_val_reg;
1563   }
1564   assert(new_val->is_register(), "must be a register at this point");
1565 
1566   if (addr->is_address()) {
1567     LIR_Address* address = addr->as_address_ptr();
1568     LIR_Opr ptr = new_pointer_register();
1569     if (!address->index()->is_valid() && address->disp() == 0) {
1570       __ move(address->base(), ptr);
1571     } else {
1572       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1573       __ leal(addr, ptr);
1574     }
1575     addr = ptr;
1576   }
1577   assert(addr->is_register(), "must be a register at this point");
1578 
1579   LIR_Opr xor_res = new_pointer_register();
1580   LIR_Opr xor_shift_res = new_pointer_register();
1581   if (TwoOperandLIRForm ) {
1582     __ move(addr, xor_res);
1583     __ logical_xor(xor_res, new_val, xor_res);
1584     __ move(xor_res, xor_shift_res);
1585     __ unsigned_shift_right(xor_shift_res,
1586                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1587                             xor_shift_res,
1588                             LIR_OprDesc::illegalOpr());
1589   } else {
1590     __ logical_xor(addr, new_val, xor_res);
1591     __ unsigned_shift_right(xor_res,
1592                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1593                             xor_shift_res,
1594                             LIR_OprDesc::illegalOpr());
1595   }
1596 
1597   if (!new_val->is_register()) {
1598     LIR_Opr new_val_reg = new_register(T_OBJECT);
1599     __ leal(new_val, new_val_reg);
1600     new_val = new_val_reg;
1601   }
1602   assert(new_val->is_register(), "must be a register at this point");
1603 
1604   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1605 
1606   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1607   __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1608   __ branch_destination(slow->continuation());
1609 }
1610 
1611 #endif // INCLUDE_ALL_GCS
1612 ////////////////////////////////////////////////////////////////////////
1613 
1614 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1615   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
1616   assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
1617   LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
1618   if (addr->is_address()) {
1619     LIR_Address* address = addr->as_address_ptr();
1620     // ptr cannot be an object because we use this barrier for array card marks
1621     // and addr can point in the middle of an array.
1622     LIR_Opr ptr = new_pointer_register();
1623     if (!address->index()->is_valid() && address->disp() == 0) {
1624       __ move(address->base(), ptr);
1625     } else {
1626       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1627       __ leal(addr, ptr);
1628     }
1629     addr = ptr;
1630   }
1631   assert(addr->is_register(), "must be a register at this point");
1632 
1633 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1634   CardTableModRef_post_barrier_helper(addr, card_table_base);
1635 #else
1636   LIR_Opr tmp = new_pointer_register();
1637   if (TwoOperandLIRForm) {
1638     __ move(addr, tmp);
1639     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1640   } else {
1641     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1642   }
1643 
1644   LIR_Address* card_addr;
1645   if (can_inline_as_constant(card_table_base)) {
1646     card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
1647   } else {
1648     card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
1649   }
1650 
1651   LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
1652   if (UseCondCardMark) {
1653     LIR_Opr cur_value = new_register(T_INT);
1654     if (UseConcMarkSweepGC) {
1655       __ membar_storeload();
1656     }
1657     __ move(card_addr, cur_value);
1658 
1659     LabelObj* L_already_dirty = new LabelObj();
1660     __ cmp(lir_cond_equal, cur_value, dirty);
1661     __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
1662     __ move(dirty, card_addr);
1663     __ branch_destination(L_already_dirty->label());
1664   } else {
1665     if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
1666       __ membar_storestore();
1667     }
1668     __ move(dirty, card_addr);
1669   }
1670 #endif






1671 }
1672 
1673 
1674 //------------------------field access--------------------------------------
1675 
1676 // Comment copied form templateTable_i486.cpp
1677 // ----------------------------------------------------------------------------
1678 // Volatile variables demand their effects be made known to all CPU's in
1679 // order.  Store buffers on most chips allow reads & writes to reorder; the
1680 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1681 // memory barrier (i.e., it's not sufficient that the interpreter does not
1682 // reorder volatile references, the hardware also must not reorder them).
1683 //
1684 // According to the new Java Memory Model (JMM):
1685 // (1) All volatiles are serialized wrt to each other.
1686 // ALSO reads & writes act as aquire & release, so:
1687 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1688 // the read float up to before the read.  It's OK for non-volatile memory refs
1689 // that happen before the volatile read to float down below it.
1690 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1691 // that happen BEFORE the write float down to after the write.  It's OK for
1692 // non-volatile memory refs that happen after the volatile write to float up
1693 // before it.
1694 //
1695 // We only put in barriers around volatile refs (they are expensive), not
1696 // _between_ memory refs (that would require us to track the flavor of the
1697 // previous memory refs).  Requirements (2) and (3) require some barriers
1698 // before volatile stores and after volatile loads.  These nearly cover
1699 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1700 // case is placed after volatile-stores although it could just as well go
1701 // before volatile-loads.
1702 
1703 
1704 void LIRGenerator::do_StoreField(StoreField* x) {
1705   bool needs_patching = x->needs_patching();
1706   bool is_volatile = x->field()->is_volatile();
1707   BasicType field_type = x->field_type();
1708   bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1709 
1710   CodeEmitInfo* info = NULL;
1711   if (needs_patching) {
1712     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1713     info = state_for(x, x->state_before());
1714   } else if (x->needs_null_check()) {
1715     NullCheck* nc = x->explicit_null_check();
1716     if (nc == NULL) {
1717       info = state_for(x);
1718     } else {
1719       info = state_for(nc);
1720     }
1721   }
1722 
1723 
1724   LIRItem object(x->obj(), this);
1725   LIRItem value(x->value(),  this);
1726 
1727   object.load_item();
1728 


1741   }
1742 
1743   set_no_result(x);
1744 
1745 #ifndef PRODUCT
1746   if (PrintNotLoaded && needs_patching) {
1747     tty->print_cr("   ###class not loaded at store_%s bci %d",
1748                   x->is_static() ?  "static" : "field", x->printable_bci());
1749   }
1750 #endif
1751 
1752   if (x->needs_null_check() &&
1753       (needs_patching ||
1754        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1755     // Emit an explicit null check because the offset is too large.
1756     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1757     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1758     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1759   }
1760 
1761   LIR_Address* address;



1762   if (needs_patching) {
1763     // we need to patch the offset in the instruction so don't allow
1764     // generate_address to try to be smart about emitting the -1.
1765     // Otherwise the patching code won't know how to find the
1766     // instruction to patch.
1767     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1768   } else {
1769     address = generate_address(object.result(), x->offset(), field_type);
1770   }
1771 
1772   if (is_volatile && os::is_MP()) {
1773     __ membar_release();






















1774   }







1775 
1776   if (is_oop) {
1777     // Do the pre-write barrier, if any.
1778     pre_barrier(LIR_OprFact::address(address),
1779                 LIR_OprFact::illegalOpr /* pre_val */,
1780                 true /* do_load*/,
1781                 needs_patching,
1782                 (info ? new CodeEmitInfo(info) : NULL));
1783   }
1784 
1785   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1786   if (needs_atomic_access && !needs_patching) {
1787     volatile_field_store(value.result(), address, info);

1788   } else {
1789     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1790     __ store(value.result(), address, info, patch_code);


1791   }
1792 
1793   if (is_oop) {
1794     // Store to object so mark the card of the header
1795     post_barrier(object.result(), value.result());
1796   }
1797 
1798   if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1799     __ membar();

1800   }


1801 }
1802 










































1803 
1804 void LIRGenerator::do_LoadField(LoadField* x) {
1805   bool needs_patching = x->needs_patching();
1806   bool is_volatile = x->field()->is_volatile();
1807   BasicType field_type = x->field_type();
1808 
1809   CodeEmitInfo* info = NULL;
1810   if (needs_patching) {
1811     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1812     info = state_for(x, x->state_before());
1813   } else if (x->needs_null_check()) {
1814     NullCheck* nc = x->explicit_null_check();
1815     if (nc == NULL) {
1816       info = state_for(x);
1817     } else {
1818       info = state_for(nc);
1819     }
1820   }
1821 
1822   LIRItem object(x->obj(), this);


2258     __ shift_left(index_op, log2_scale, tmp);
2259     if (!TwoOperandLIRForm) {
2260       index_op = tmp;
2261     }
2262   }
2263 
2264   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2265 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2266   __ move(value.result(), addr);
2267 }
2268 
2269 
2270 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2271   BasicType type = x->basic_type();
2272   LIRItem src(x->object(), this);
2273   LIRItem off(x->offset(), this);
2274 
2275   off.load_item();
2276   src.load_item();
2277 
2278   LIR_Opr value = rlock_result(x, x->basic_type());
2279 
2280   if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
2281     __ membar();
2282   }
2283 
2284   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2285 
2286 #if INCLUDE_ALL_GCS
2287   // We might be reading the value of the referent field of a
2288   // Reference object in order to attach it back to the live
2289   // object graph. If G1 is enabled then we need to record
2290   // the value that is being returned in an SATB log buffer.
2291   //
2292   // We need to generate code similar to the following...
2293   //
2294   // if (offset == java_lang_ref_Reference::referent_offset) {
2295   //   if (src != NULL) {
2296   //     if (klass(src)->reference_type() != REF_NONE) {
2297   //       pre_barrier(..., value, ...);
2298   //     }
2299   //   }
2300   // }
2301 
2302   if (UseG1GC && type == T_OBJECT) {
2303     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2304     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2305     bool gen_source_check = true;    // Assume we need to check the src object for null.
2306     bool gen_type_check = true;      // Assume we need to check the reference_type.
2307 
2308     if (off.is_constant()) {
2309       jlong off_con = (off.type()->is_int() ?
2310                         (jlong) off.get_jint_constant() :
2311                         off.get_jlong_constant());
2312 
2313 
2314       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2315         // The constant offset is something other than referent_offset.
2316         // We can skip generating/checking the remaining guards and
2317         // skip generation of the code stub.
2318         gen_pre_barrier = false;
2319       } else {
2320         // The constant offset is the same as referent_offset -
2321         // we do not need to generate a runtime offset check.
2322         gen_offset_check = false;
2323       }
2324     }
2325 
2326     // We don't need to generate stub if the source object is an array
2327     if (gen_pre_barrier && src.type()->is_array()) {
2328       gen_pre_barrier = false;
2329     }
2330 
2331     if (gen_pre_barrier) {
2332       // We still need to continue with the checks.
2333       if (src.is_constant()) {
2334         ciObject* src_con = src.get_jobject_constant();
2335         guarantee(src_con != NULL, "no source constant");
2336 
2337         if (src_con->is_null_object()) {
2338           // The constant src object is null - We can skip
2339           // generating the code stub.
2340           gen_pre_barrier = false;
2341         } else {
2342           // Non-null constant source object. We still have to generate
2343           // the slow stub - but we don't need to generate the runtime
2344           // null object check.
2345           gen_source_check = false;
2346         }
2347       }
2348     }
2349     if (gen_pre_barrier && !PatchALot) {
2350       // Can the klass of object be statically determined to be
2351       // a sub-class of Reference?
2352       ciType* type = src.value()->declared_type();
2353       if ((type != NULL) && type->is_loaded()) {
2354         if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2355           gen_type_check = false;
2356         } else if (type->is_klass() &&
2357                    !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2358           // Not Reference and not Object klass.
2359           gen_pre_barrier = false;
2360         }
2361       }
2362     }
2363 
2364     if (gen_pre_barrier) {
2365       LabelObj* Lcont = new LabelObj();
2366 
2367       // We can have generate one runtime check here. Let's start with
2368       // the offset check.
2369       if (gen_offset_check) {
2370         // if (offset != referent_offset) -> continue
2371         // If offset is an int then we can do the comparison with the
2372         // referent_offset constant; otherwise we need to move
2373         // referent_offset into a temporary register and generate
2374         // a reg-reg compare.
2375 
2376         LIR_Opr referent_off;
2377 
2378         if (off.type()->is_int()) {
2379           referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2380         } else {
2381           assert(off.type()->is_long(), "what else?");
2382           referent_off = new_register(T_LONG);
2383           __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2384         }
2385         __ cmp(lir_cond_notEqual, off.result(), referent_off);
2386         __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2387       }
2388       if (gen_source_check) {
2389         // offset is a const and equals referent offset
2390         // if (source == null) -> continue
2391         __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2392         __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2393       }
2394       LIR_Opr src_klass = new_register(T_OBJECT);
2395       if (gen_type_check) {
2396         // We have determined that offset == referent_offset && src != null.
2397         // if (src->_klass->_reference_type == REF_NONE) -> continue
2398         __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2399         LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2400         LIR_Opr reference_type = new_register(T_INT);
2401         __ move(reference_type_addr, reference_type);
2402         __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2403         __ branch(lir_cond_equal, T_INT, Lcont->label());
2404       }
2405       {
2406         // We have determined that src->_klass->_reference_type != REF_NONE
2407         // so register the value in the referent field with the pre-barrier.
2408         pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2409                     value  /* pre_val */,
2410                     false  /* do_load */,
2411                     false  /* patch */,
2412                     NULL   /* info */);
2413       }
2414       __ branch_destination(Lcont->label());
2415     }


2416   }
2417 #endif // INCLUDE_ALL_GCS
2418 
2419   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2420 
2421   /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
2422   if (type == T_BOOLEAN) {
2423     LabelObj* equalZeroLabel = new LabelObj();
2424     __ cmp(lir_cond_equal, value, 0);
2425     __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
2426     __ move(LIR_OprFact::intConst(1), value);
2427     __ branch_destination(equalZeroLabel->label());
2428   }
2429 }
2430 
2431 
2432 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2433   BasicType type = x->basic_type();
2434   LIRItem src(x->object(), this);
2435   LIRItem off(x->offset(), this);
2436   LIRItem data(x->value(), this);
2437 
2438   src.load_item();
2439   if (type == T_BOOLEAN || type == T_BYTE) {
2440     data.load_byte_item();
2441   } else {
2442     data.load_item();
2443   }
2444   off.load_item();
2445 
2446   set_no_result(x);
2447 
2448   if (x->is_volatile() && os::is_MP()) __ membar_release();
2449   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2450   if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();





2451 }
2452 
















2453 
2454 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2455   int lng = x->length();
2456 
2457   for (int i = 0; i < lng; i++) {
2458     SwitchRange* one_range = x->at(i);
2459     int low_key = one_range->low_key();
2460     int high_key = one_range->high_key();
2461     BlockBegin* dest = one_range->sux();
2462     if (low_key == high_key) {
2463       __ cmp(lir_cond_equal, value, low_key);
2464       __ branch(lir_cond_equal, T_INT, dest);
2465     } else if (high_key - low_key == 1) {
2466       __ cmp(lir_cond_equal, value, low_key);
2467       __ branch(lir_cond_equal, T_INT, dest);
2468       __ cmp(lir_cond_equal, value, high_key);
2469       __ branch(lir_cond_equal, T_INT, dest);
2470     } else {
2471       LabelObj* L = new LabelObj();
2472       __ cmp(lir_cond_less, value, low_key);


3749   }
3750   return result;
3751 }
3752 
3753 void LIRGenerator::do_MemBar(MemBar* x) {
3754   if (os::is_MP()) {
3755     LIR_Code code = x->code();
3756     switch(code) {
3757       case lir_membar_acquire   : __ membar_acquire(); break;
3758       case lir_membar_release   : __ membar_release(); break;
3759       case lir_membar           : __ membar(); break;
3760       case lir_membar_loadload  : __ membar_loadload(); break;
3761       case lir_membar_storestore: __ membar_storestore(); break;
3762       case lir_membar_loadstore : __ membar_loadstore(); break;
3763       case lir_membar_storeload : __ membar_storeload(); break;
3764       default                   : ShouldNotReachHere(); break;
3765     }
3766   }
3767 }
3768 
3769 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3770   if (x->check_boolean()) {
3771     LIR_Opr value_fixed = rlock_byte(T_BYTE);
3772     if (TwoOperandLIRForm) {
3773       __ move(value, value_fixed);
3774       __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3775     } else {
3776       __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3777     }
3778     LIR_Opr klass = new_register(T_METADATA);
3779     __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3780     null_check_info = NULL;
3781     LIR_Opr layout = new_register(T_INT);
3782     __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3783     int diffbit = Klass::layout_helper_boolean_diffbit();
3784     __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3785     __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3786     __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3787     value = value_fixed;






3788   }
3789   return value;
3790 }


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"
  37 #include "gc/shared/c1BarrierSetCodeGen.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/vm_version.hpp"
  42 #include "utilities/bitMap.inline.hpp"
  43 #include "utilities/macros.hpp"



  44 #ifdef TRACE_HAVE_INTRINSICS
  45 #include "trace/traceMacros.hpp"
  46 #endif
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 #ifndef PATCHED_ADDR
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 
  58 void PhiResolverState::reset(int max_vregs) {
  59   // Initialize array sizes
  60   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  61   _virtual_operands.trunc_to(0);
  62   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  63   _other_operands.trunc_to(0);


 290 
 291 
 292 jdouble LIRItem::get_jdouble_constant() const {
 293   assert(is_constant() && value() != NULL, "");
 294   assert(type()->as_DoubleConstant() != NULL, "type check");
 295   return type()->as_DoubleConstant()->value();
 296 }
 297 
 298 
 299 jlong LIRItem::get_jlong_constant() const {
 300   assert(is_constant() && value() != NULL, "");
 301   assert(type()->as_LongConstant() != NULL, "type check");
 302   return type()->as_LongConstant()->value();
 303 }
 304 
 305 
 306 
 307 //--------------------------------------------------------------
 308 
 309 





 310 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 311 #ifndef PRODUCT
 312   if (PrintIRWithLIR) {
 313     block->print();
 314   }
 315 #endif
 316 
 317   // set up the list of LIR instructions
 318   assert(block->lir() == NULL, "LIR list already computed for this block");
 319   _lir = new LIR_List(compilation(), block);
 320   block->set_lir(_lir);
 321 
 322   __ branch_destination(block->label());
 323 
 324   if (LIRTraceExecution &&
 325       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 326       !block->is_set(BlockBegin::exception_entry_flag)) {
 327     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 328     trace_block_entry(block);
 329   }


1217 }
1218 
1219 // Examble: ref.get()
1220 // Combination of LoadField and g1 pre-write barrier
1221 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1222 
1223   const int referent_offset = java_lang_ref_Reference::referent_offset;
1224   guarantee(referent_offset > 0, "referent offset not initialized");
1225 
1226   assert(x->number_of_arguments() == 1, "wrong type");
1227 
1228   LIRItem reference(x->argument_at(0), this);
1229   reference.load_item();
1230 
1231   // need to perform the null check on the reference objecy
1232   CodeEmitInfo* info = NULL;
1233   if (x->needs_null_check()) {
1234     info = state_for(x);
1235   }
1236 
1237   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_WEAK;
1238   LIR_Opr result = access_load_at(decorators, as_BasicType(x->type()),
1239                                   reference, LIR_OprFact::intConst(referent_offset),
1240                                   NULL, NULL);
1241   set_result(x, result);








1242 }
1243 
1244 // Example: clazz.isInstance(object)
1245 void LIRGenerator::do_isInstance(Intrinsic* x) {
1246   assert(x->number_of_arguments() == 2, "wrong type");
1247 
1248   // TODO could try to substitute this node with an equivalent InstanceOf
1249   // if clazz is known to be a constant Class. This will pick up newly found
1250   // constants after HIR construction. I'll leave this to a future change.
1251 
1252   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1253   // could follow the aastore example in a future change.
1254 
1255   LIRItem clazz(x->argument_at(0), this);
1256   LIRItem object(x->argument_at(1), this);
1257   clazz.load_item();
1258   object.load_item();
1259   LIR_Opr result = rlock_result(x);
1260 
1261   // need to perform null check on clazz


1414       case T_LONG:
1415       case T_DOUBLE:
1416         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1417         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1418         break;
1419       case T_OBJECT:
1420         if (c->as_jobject() != other->as_jobject()) continue;
1421         break;
1422       }
1423       return _reg_for_constants.at(i);
1424     }
1425   }
1426 
1427   LIR_Opr result = new_register(t);
1428   __ move((LIR_Opr)c, result);
1429   _constants.append(c);
1430   _reg_for_constants.append(result);
1431   return result;
1432 }
1433 
1434 //------------------------field access--------------------------------------
















































































































































































































1435 
1436 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1437   assert(x->number_of_arguments() == 4, "wrong type");
1438   LIRItem obj   (x->argument_at(0), this);  // object
1439   LIRItem offset(x->argument_at(1), this);  // offset of field
1440   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1441   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1442   assert(obj.type()->tag() == objectTag, "invalid type");
1443 
1444   // In 64bit the type can be long, sparc doesn't have this assert
1445   // assert(offset.type()->tag() == intTag, "invalid type");
1446 
1447   assert(cmp.type()->tag() == type->tag(), "invalid type");
1448   assert(val.type()->tag() == type->tag(), "invalid type");
1449 
1450   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_MO_VOLATILE;
1451   LIR_Opr result = access_cas_at(decorators, as_BasicType(type),
1452                                  obj, offset, cmp, val);
1453   set_result(x, result);
1454 }
1455 



1456 // Comment copied form templateTable_i486.cpp
1457 // ----------------------------------------------------------------------------
1458 // Volatile variables demand their effects be made known to all CPU's in
1459 // order.  Store buffers on most chips allow reads & writes to reorder; the
1460 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1461 // memory barrier (i.e., it's not sufficient that the interpreter does not
1462 // reorder volatile references, the hardware also must not reorder them).
1463 //
1464 // According to the new Java Memory Model (JMM):
1465 // (1) All volatiles are serialized wrt to each other.
1466 // ALSO reads & writes act as aquire & release, so:
1467 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1468 // the read float up to before the read.  It's OK for non-volatile memory refs
1469 // that happen before the volatile read to float down below it.
1470 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1471 // that happen BEFORE the write float down to after the write.  It's OK for
1472 // non-volatile memory refs that happen after the volatile write to float up
1473 // before it.
1474 //
1475 // We only put in barriers around volatile refs (they are expensive), not
1476 // _between_ memory refs (that would require us to track the flavor of the
1477 // previous memory refs).  Requirements (2) and (3) require some barriers
1478 // before volatile stores and after volatile loads.  These nearly cover
1479 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1480 // case is placed after volatile-stores although it could just as well go
1481 // before volatile-loads.
1482 
1483 
1484 void LIRGenerator::do_StoreField(StoreField* x) {
1485   bool needs_patching = x->needs_patching();
1486   bool is_volatile = x->field()->is_volatile();
1487   BasicType field_type = x->field_type();

1488 
1489   CodeEmitInfo* info = NULL;
1490   if (needs_patching) {
1491     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1492     info = state_for(x, x->state_before());
1493   } else if (x->needs_null_check()) {
1494     NullCheck* nc = x->explicit_null_check();
1495     if (nc == NULL) {
1496       info = state_for(x);
1497     } else {
1498       info = state_for(nc);
1499     }
1500   }
1501 
1502 
1503   LIRItem object(x->obj(), this);
1504   LIRItem value(x->value(),  this);
1505 
1506   object.load_item();
1507 


1520   }
1521 
1522   set_no_result(x);
1523 
1524 #ifndef PRODUCT
1525   if (PrintNotLoaded && needs_patching) {
1526     tty->print_cr("   ###class not loaded at store_%s bci %d",
1527                   x->is_static() ?  "static" : "field", x->printable_bci());
1528   }
1529 #endif
1530 
1531   if (x->needs_null_check() &&
1532       (needs_patching ||
1533        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1534     // Emit an explicit null check because the offset is too large.
1535     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1536     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1537     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1538   }
1539 
1540   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP;
1541   if (is_volatile) {
1542     decorators |= C1_MO_VOLATILE;
1543   }
1544   if (needs_patching) {
1545     decorators |= C1_NEEDS_PATCHING;






1546   }
1547 
1548   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), value.result(), info ? new CodeEmitInfo(info) : NULL, info);
1549 }
1550 
1551 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1552   assert(x->is_pinned(),"");
1553   bool needs_range_check = x->compute_needs_range_check();
1554   bool use_length = x->length() != NULL;
1555   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1556   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1557                                          !get_jobject_constant(x->value())->is_null_object() ||
1558                                          x->should_profile());
1559 
1560   LIRItem array(x->array(), this);
1561   LIRItem index(x->index(), this);
1562   LIRItem value(x->value(), this);
1563   LIRItem length(this);
1564 
1565   array.load_item();
1566   index.load_nonconstant();
1567 
1568   if (use_length && needs_range_check) {
1569     length.set_instruction(x->length());
1570     length.load_item();
1571 
1572   }
1573   if (needs_store_check || x->check_boolean()) {
1574     value.load_item();
1575   } else {
1576     value.load_for_store(x->elt_type());
1577   }
1578 
1579   set_no_result(x);
1580 
1581   // the CodeEmitInfo must be duplicated for each different
1582   // LIR-instruction because spilling can occur anywhere between two
1583   // instructions and so the debug information must be different
1584   CodeEmitInfo* range_check_info = state_for(x);
1585   CodeEmitInfo* null_check_info = NULL;
1586   if (x->needs_null_check()) {
1587     null_check_info = new CodeEmitInfo(range_check_info);
1588   }
1589 
1590   if (GenerateRangeChecks && needs_range_check) {
1591     if (use_length) {
1592       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1593       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1594     } else {
1595       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1596       // range_check also does the null check
1597       null_check_info = NULL;
1598     }
1599   }
1600 
1601   if (GenerateArrayStoreCheck && needs_store_check) {
1602     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1603     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1604   }
1605 
1606   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ARRAY;
1607   if (x->elt_type() == T_BOOLEAN && x->check_boolean()) {
1608     decorators |= C1_MASK_BOOLEAN;
1609   }
1610 
1611   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), NULL, null_check_info);
1612 }
1613 
1614 LIR_Opr LIRGenerator::access_cas_at(C1DecoratorSet decorators, BasicType type,
1615                                     LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1616   BarrierSet *bs = Universe::heap()->barrier_set();
1617   C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
1618   return code_gen->cas_at(this, decorators, type,
1619                           base, offset, cmp_value, new_value);
1620 
1621 }
1622 
1623 LIR_Opr LIRGenerator::access_swap_at(C1DecoratorSet decorators, BasicType type,
1624                                      LIRItem& base, LIRItem& offset, LIRItem& value) {
1625   BarrierSet *bs = Universe::heap()->barrier_set();
1626   C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
1627   return code_gen->swap_at(this, decorators, type,
1628                            base, offset, value);
1629 }
1630 
1631 LIR_Opr LIRGenerator::access_add_at(C1DecoratorSet decorators, BasicType type,
1632                                     LIRItem& base, LIRItem& offset, LIRItem& value) {
1633   BarrierSet *bs = Universe::heap()->barrier_set();
1634   C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
1635   return code_gen->add_at(this, decorators, type,
1636                           base, offset, value);
1637 }
1638 
1639 void LIRGenerator::access_store_at(C1DecoratorSet decorators, BasicType type,
1640                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1641                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1642   BarrierSet *bs = Universe::heap()->barrier_set();
1643   C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
1644   code_gen->store_at(this, decorators, type,
1645                      base, offset, value, patch_info, store_emit_info);
1646 }
1647 
1648 LIR_Opr LIRGenerator::access_load_at(C1DecoratorSet decorators, BasicType type,
1649                                      LIRItem& base, LIR_Opr offset,
1650                                      CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1651   BarrierSet *bs = Universe::heap()->barrier_set();
1652   C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
1653   return code_gen->load_at(this, decorators, type,
1654                            base, offset, patch_info, load_emit_info);
1655 }
1656 
1657 void LIRGenerator::do_LoadField(LoadField* x) {
1658   bool needs_patching = x->needs_patching();
1659   bool is_volatile = x->field()->is_volatile();
1660   BasicType field_type = x->field_type();
1661 
1662   CodeEmitInfo* info = NULL;
1663   if (needs_patching) {
1664     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1665     info = state_for(x, x->state_before());
1666   } else if (x->needs_null_check()) {
1667     NullCheck* nc = x->explicit_null_check();
1668     if (nc == NULL) {
1669       info = state_for(x);
1670     } else {
1671       info = state_for(nc);
1672     }
1673   }
1674 
1675   LIRItem object(x->obj(), this);


2111     __ shift_left(index_op, log2_scale, tmp);
2112     if (!TwoOperandLIRForm) {
2113       index_op = tmp;
2114     }
2115   }
2116 
2117   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2118 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2119   __ move(value.result(), addr);
2120 }
2121 
2122 
2123 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2124   BasicType type = x->basic_type();
2125   LIRItem src(x->object(), this);
2126   LIRItem off(x->offset(), this);
2127 
2128   off.load_item();
2129   src.load_item();
2130 
2131   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS;
2132 
2133   if (type == T_BOOLEAN) {
2134     decorators |= C1_MASK_BOOLEAN;





































































































































2135   }
2136   if (x->is_volatile()) {
2137     decorators |= C1_MO_VOLATILE;
2138   }



2139 
2140   LIR_Opr result = access_load_at(decorators, type,
2141                                   src, off.result(),
2142                                   NULL, NULL);
2143   set_result(x, result);




2144 }
2145 
2146 
2147 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2148   BasicType type = x->basic_type();
2149   LIRItem src(x->object(), this);
2150   LIRItem off(x->offset(), this);
2151   LIRItem data(x->value(), this);
2152 
2153   src.load_item();
2154   if (type == T_BOOLEAN || type == T_BYTE) {
2155     data.load_byte_item();
2156   } else {
2157     data.load_item();
2158   }
2159   off.load_item();
2160 
2161   set_no_result(x);
2162 
2163   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS;
2164   if (type == T_BOOLEAN) {
2165     decorators |= C1_MASK_BOOLEAN;
2166   }
2167   if (x->is_volatile()) {
2168     decorators |= C1_MO_VOLATILE;
2169   }
2170   access_store_at(decorators, type, src, off.result(), data.result(), NULL, NULL);
2171 }
2172 
2173 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2174   BasicType type = x->basic_type();
2175   LIRItem src(x->object(), this);
2176   LIRItem off(x->offset(), this);
2177   LIRItem value(x->value(), this);
2178 
2179   C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS | C1_MO_VOLATILE;
2180 
2181   LIR_Opr result;
2182   if (x->is_add()) {
2183     result = access_add_at(decorators, type, src, off, value);
2184   } else {
2185     result = access_swap_at(decorators, type, src, off, value);
2186   }
2187   set_result(x, result);
2188 }
2189 
2190 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2191   int lng = x->length();
2192 
2193   for (int i = 0; i < lng; i++) {
2194     SwitchRange* one_range = x->at(i);
2195     int low_key = one_range->low_key();
2196     int high_key = one_range->high_key();
2197     BlockBegin* dest = one_range->sux();
2198     if (low_key == high_key) {
2199       __ cmp(lir_cond_equal, value, low_key);
2200       __ branch(lir_cond_equal, T_INT, dest);
2201     } else if (high_key - low_key == 1) {
2202       __ cmp(lir_cond_equal, value, low_key);
2203       __ branch(lir_cond_equal, T_INT, dest);
2204       __ cmp(lir_cond_equal, value, high_key);
2205       __ branch(lir_cond_equal, T_INT, dest);
2206     } else {
2207       LabelObj* L = new LabelObj();
2208       __ cmp(lir_cond_less, value, low_key);


3485   }
3486   return result;
3487 }
3488 
3489 void LIRGenerator::do_MemBar(MemBar* x) {
3490   if (os::is_MP()) {
3491     LIR_Code code = x->code();
3492     switch(code) {
3493       case lir_membar_acquire   : __ membar_acquire(); break;
3494       case lir_membar_release   : __ membar_release(); break;
3495       case lir_membar           : __ membar(); break;
3496       case lir_membar_loadload  : __ membar_loadload(); break;
3497       case lir_membar_storestore: __ membar_storestore(); break;
3498       case lir_membar_loadstore : __ membar_loadstore(); break;
3499       case lir_membar_storeload : __ membar_storeload(); break;
3500       default                   : ShouldNotReachHere(); break;
3501     }
3502   }
3503 }
3504 
3505 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {

3506   LIR_Opr value_fixed = rlock_byte(T_BYTE);
3507   if (TwoOperandLIRForm) {
3508     __ move(value, value_fixed);
3509     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3510   } else {
3511     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3512   }
3513   LIR_Opr klass = new_register(T_METADATA);
3514   __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3515   null_check_info = NULL;
3516   LIR_Opr layout = new_register(T_INT);
3517   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3518   int diffbit = Klass::layout_helper_boolean_diffbit();
3519   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3520   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3521   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3522   value = value_fixed;
3523   return value;
3524 }
3525 
3526 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3527   if (x->check_boolean()) {
3528     value = mask_boolean(array, value, null_check_info);
3529   }
3530   return value;
3531 }
< prev index next >