< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"

  37 #include "runtime/arguments.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/vm_version.hpp"
  41 #include "utilities/bitMap.inline.hpp"
  42 #include "utilities/macros.hpp"
  43 #if INCLUDE_ALL_GCS
  44 #include "gc/g1/heapRegion.hpp"
  45 #endif // INCLUDE_ALL_GCS
  46 
  47 #ifdef ASSERT
  48 #define __ gen()->lir(__FILE__, __LINE__)->
  49 #else
  50 #define __ gen()->lir()->
  51 #endif
  52 
  53 #ifndef PATCHED_ADDR
  54 #define PATCHED_ADDR  (max_jint)
  55 #endif
  56 


 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {
 238       _result = LIR_OprFact::value_type(value()->type());
 239     }
 240   } else if (type == T_BYTE || type == T_BOOLEAN) {
 241     load_byte_item();
 242   } else {
 243     load_item();
 244   }
 245 }
 246 
 247 void LIRItem::load_item_force(LIR_Opr reg) {
 248   LIR_Opr r = result();
 249   if (r != reg) {






 250 #if !defined(ARM) && !defined(E500V2)
 251     if (r->type() != reg->type()) {
 252       // moves between different types need an intervening spill slot
 253       r = _gen->force_to_spill(r, reg->type());
 254     }
 255 #endif
 256     __ move(r, reg);
 257     _result = reg;


 258   }
 259 }
 260 
 261 ciObject* LIRItem::get_jobject_constant() const {
 262   ObjectType* oc = type()->as_ObjectType();
 263   if (oc) {
 264     return oc->constant_value();
 265   }
 266   return NULL;
 267 }
 268 
 269 
 270 jint LIRItem::get_jint_constant() const {
 271   assert(is_constant() && value() != NULL, "");
 272   assert(type()->as_IntConstant() != NULL, "type check");
 273   return type()->as_IntConstant()->value();
 274 }
 275 
 276 
 277 jint LIRItem::get_address_constant() const {


1405       }
1406       return _reg_for_constants.at(i);
1407     }
1408   }
1409 
1410   LIR_Opr result = new_register(t);
1411   __ move((LIR_Opr)c, result);
1412   _constants.append(c);
1413   _reg_for_constants.append(result);
1414   return result;
1415 }
1416 
1417 // Various barriers
1418 
1419 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1420                                bool do_load, bool patch, CodeEmitInfo* info) {
1421   // Do the pre-write barrier, if any.
1422   switch (_bs->kind()) {
1423 #if INCLUDE_ALL_GCS
1424     case BarrierSet::G1SATBCTLogging:

1425       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1426       break;
1427 #endif // INCLUDE_ALL_GCS
1428     case BarrierSet::CardTableForRS:
1429     case BarrierSet::CardTableExtension:
1430       // No pre barriers
1431       break;
1432     case BarrierSet::ModRef:
1433       // No pre barriers
1434       break;
1435     default      :
1436       ShouldNotReachHere();
1437 
1438   }
1439 }
1440 
1441 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1442   switch (_bs->kind()) {
1443 #if INCLUDE_ALL_GCS
1444     case BarrierSet::G1SATBCTLogging:
1445       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1446       break;


1447 #endif // INCLUDE_ALL_GCS
1448     case BarrierSet::CardTableForRS:
1449     case BarrierSet::CardTableExtension:
1450       CardTableModRef_post_barrier(addr,  new_val);
1451       break;
1452     case BarrierSet::ModRef:
1453       // No post barriers
1454       break;
1455     default      :
1456       ShouldNotReachHere();
1457     }
1458 }
1459 
1460 ////////////////////////////////////////////////////////////////////////
1461 #if INCLUDE_ALL_GCS
1462 
1463 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1464                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1465   // First we test whether marking is in progress.
1466   BasicType flag_type;


1697     // load item if field not constant
1698     // because of code patching we cannot inline constants
1699     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1700       value.load_byte_item();
1701     } else  {
1702       value.load_item();
1703     }
1704   } else {
1705     value.load_for_store(field_type);
1706   }
1707 
1708   set_no_result(x);
1709 
1710 #ifndef PRODUCT
1711   if (PrintNotLoaded && needs_patching) {
1712     tty->print_cr("   ###class not loaded at store_%s bci %d",
1713                   x->is_static() ?  "static" : "field", x->printable_bci());
1714   }
1715 #endif
1716 


1717   if (x->needs_null_check() &&
1718       (needs_patching ||
1719        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1720     // emit an explicit null check because the offset is too large
1721     __ null_check(object.result(), new CodeEmitInfo(info));










1722   }
1723 
1724   LIR_Address* address;
1725   if (needs_patching) {
1726     // we need to patch the offset in the instruction so don't allow
1727     // generate_address to try to be smart about emitting the -1.
1728     // Otherwise the patching code won't know how to find the
1729     // instruction to patch.
1730     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1731   } else {
1732     address = generate_address(object.result(), x->offset(), field_type);
1733   }
1734 
1735   if (is_volatile && os::is_MP()) {
1736     __ membar_release();
1737   }
1738 
1739   if (is_oop) {
1740     // Do the pre-write barrier, if any.
1741     pre_barrier(LIR_OprFact::address(address),
1742                 LIR_OprFact::illegalOpr /* pre_val */,
1743                 true /* do_load*/,
1744                 needs_patching,
1745                 (info ? new CodeEmitInfo(info) : NULL));
1746   }
1747 
1748   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1749   if (needs_atomic_access && !needs_patching) {
1750     volatile_field_store(value.result(), address, info);
1751   } else {
1752     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1753     __ store(value.result(), address, info, patch_code);
1754   }
1755 
1756   if (is_oop) {
1757     // Store to object so mark the card of the header
1758     post_barrier(object.result(), value.result());
1759   }
1760 
1761   if (is_volatile && os::is_MP()) {
1762     __ membar();
1763   }
1764 }
1765 
1766 
1767 void LIRGenerator::do_LoadField(LoadField* x) {
1768   bool needs_patching = x->needs_patching();
1769   bool is_volatile = x->field()->is_volatile();
1770   BasicType field_type = x->field_type();
1771 
1772   CodeEmitInfo* info = NULL;
1773   if (needs_patching) {
1774     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1775     info = state_for(x, x->state_before());
1776   } else if (x->needs_null_check()) {
1777     NullCheck* nc = x->explicit_null_check();
1778     if (nc == NULL) {
1779       info = state_for(x);
1780     } else {
1781       info = state_for(nc);
1782     }
1783   }
1784 
1785   LIRItem object(x->obj(), this);
1786 
1787   object.load_item();
1788 
1789 #ifndef PRODUCT
1790   if (PrintNotLoaded && needs_patching) {
1791     tty->print_cr("   ###class not loaded at load_%s bci %d",
1792                   x->is_static() ?  "static" : "field", x->printable_bci());
1793   }
1794 #endif
1795 

1796   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1797   if (x->needs_null_check() &&
1798       (needs_patching ||
1799        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1800        stress_deopt)) {
1801     LIR_Opr obj = object.result();
1802     if (stress_deopt) {
1803       obj = new_register(T_OBJECT);
1804       __ move(LIR_OprFact::oopConst(NULL), obj);
1805     }
1806     // emit an explicit null check because the offset is too large
1807     __ null_check(obj, new CodeEmitInfo(info));
1808   }
1809 

1810   LIR_Opr reg = rlock_result(x, field_type);
1811   LIR_Address* address;
1812   if (needs_patching) {
1813     // we need to patch the offset in the instruction so don't allow
1814     // generate_address to try to be smart about emitting the -1.
1815     // Otherwise the patching code won't know how to find the
1816     // instruction to patch.
1817     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1818   } else {
1819     address = generate_address(object.result(), x->offset(), field_type);
1820   }
1821 
1822   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1823   if (needs_atomic_access && !needs_patching) {
1824     volatile_field_load(address, reg, info);
1825   } else {
1826     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1827     __ load(address, reg, info, patch_code);
1828   }
1829 
1830   if (is_volatile && os::is_MP()) {
1831     __ membar_acquire();
1832   }
1833 }
1834 

































1835 
1836 //------------------------java.nio.Buffer.checkIndex------------------------
1837 
1838 // int java.nio.Buffer.checkIndex(int)
1839 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1840   // NOTE: by the time we are in checkIndex() we are guaranteed that
1841   // the buffer is non-null (because checkIndex is package-private and
1842   // only called from within other methods in the buffer).
1843   assert(x->number_of_arguments() == 2, "wrong type");
1844   LIRItem buf  (x->argument_at(0), this);
1845   LIRItem index(x->argument_at(1), this);
1846   buf.load_item();
1847   index.load_item();
1848 
1849   LIR_Opr result = rlock_result(x);
1850   if (GenerateRangeChecks) {
1851     CodeEmitInfo* info = state_for(x);
1852     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1853     if (index.result()->is_constant()) {
1854       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);


1911   } else {
1912     index.load_item();
1913   }
1914 
1915   CodeEmitInfo* range_check_info = state_for(x);
1916   CodeEmitInfo* null_check_info = NULL;
1917   if (x->needs_null_check()) {
1918     NullCheck* nc = x->explicit_null_check();
1919     if (nc != NULL) {
1920       null_check_info = state_for(nc);
1921     } else {
1922       null_check_info = range_check_info;
1923     }
1924     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1925       LIR_Opr obj = new_register(T_OBJECT);
1926       __ move(LIR_OprFact::oopConst(NULL), obj);
1927       __ null_check(obj, new CodeEmitInfo(null_check_info));
1928     }
1929   }
1930 



1931   // emit array address setup early so it schedules better
1932   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1933 
1934   if (GenerateRangeChecks && needs_range_check) {
1935     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1936       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1937     } else if (use_length) {
1938       // TODO: use a (modified) version of array_range_check that does not require a
1939       //       constant length to be loaded to a register
1940       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1941       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1942     } else {
1943       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1944       // The range check performs the null check, so clear it out for the load
1945       null_check_info = NULL;
1946     }
1947   }
1948 
1949   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1950 }
1951 
1952 
1953 void LIRGenerator::do_NullCheck(NullCheck* x) {
1954   if (x->can_trap()) {
1955     LIRItem value(x->obj(), this);
1956     value.load_item();
1957     CodeEmitInfo* info = state_for(x);
1958     __ null_check(value.result(), info);
1959   }
1960 }
1961 
1962 
1963 void LIRGenerator::do_TypeCast(TypeCast* x) {


2236   LIR_Opr value = rlock_result(x, x->basic_type());
2237 
2238   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2239 
2240 #if INCLUDE_ALL_GCS
2241   // We might be reading the value of the referent field of a
2242   // Reference object in order to attach it back to the live
2243   // object graph. If G1 is enabled then we need to record
2244   // the value that is being returned in an SATB log buffer.
2245   //
2246   // We need to generate code similar to the following...
2247   //
2248   // if (offset == java_lang_ref_Reference::referent_offset) {
2249   //   if (src != NULL) {
2250   //     if (klass(src)->reference_type() != REF_NONE) {
2251   //       pre_barrier(..., value, ...);
2252   //     }
2253   //   }
2254   // }
2255 
2256   if (UseG1GC && type == T_OBJECT) {
2257     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2258     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2259     bool gen_source_check = true;    // Assume we need to check the src object for null.
2260     bool gen_type_check = true;      // Assume we need to check the reference_type.
2261 
2262     if (off.is_constant()) {
2263       jlong off_con = (off.type()->is_int() ?
2264                         (jlong) off.get_jint_constant() :
2265                         off.get_jlong_constant());
2266 
2267 
2268       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2269         // The constant offset is something other than referent_offset.
2270         // We can skip generating/checking the remaining guards and
2271         // skip generation of the code stub.
2272         gen_pre_barrier = false;
2273       } else {
2274         // The constant offset is the same as referent_offset -
2275         // we do not need to generate a runtime offset check.
2276         gen_offset_check = false;


2778     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2779   }
2780 
2781   if (method()->is_synchronized()) {
2782     LIR_Opr obj;
2783     if (method()->is_static()) {
2784       obj = new_register(T_OBJECT);
2785       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2786     } else {
2787       Local* receiver = x->state()->local_at(0)->as_Local();
2788       assert(receiver != NULL, "must already exist");
2789       obj = receiver->operand();
2790     }
2791     assert(obj->is_valid(), "must be valid");
2792 
2793     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2794       LIR_Opr lock = new_register(T_INT);
2795       __ load_stack_address_monitor(0, lock);
2796 
2797       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));

2798       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2799 
2800       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2801       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2802     }
2803   }
2804   if (compilation()->age_code()) {
2805     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2806     decrement_age(info);
2807   }
2808   // increment invocation counters if needed
2809   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2810     profile_parameters(x);
2811     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2812     increment_invocation_counter(info);
2813   }
2814 
2815   // all blocks with a successor must end with an unconditional jump
2816   // to the successor even if they are consecutive
2817   __ jump(x->default_sux());


2998   if (result_register->is_valid()) {
2999     LIR_Opr result = rlock_result(x);
3000     __ move(result_register, result);
3001   }
3002 }
3003 
3004 
3005 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3006   assert(x->number_of_arguments() == 1, "wrong type");
3007   LIRItem value       (x->argument_at(0), this);
3008   LIR_Opr reg = rlock_result(x);
3009   value.load_item();
3010   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3011   __ move(tmp, reg);
3012 }
3013 
3014 
3015 
3016 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3017 void LIRGenerator::do_IfOp(IfOp* x) {

3018 #ifdef ASSERT
3019   {
3020     ValueTag xtag = x->x()->type()->tag();
3021     ValueTag ttag = x->tval()->type()->tag();
3022     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3023     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3024     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3025   }
3026 #endif
3027 
3028   LIRItem left(x->x(), this);
3029   LIRItem right(x->y(), this);
3030   left.load_item();
3031   if (can_inline_as_constant(right.value())) {
3032     right.dont_load_item();
3033   } else {
3034     right.load_item();
3035   }
3036 
3037   LIRItem t_val(x->tval(), this);
3038   LIRItem f_val(x->fval(), this);
3039   t_val.dont_load_item();
3040   f_val.dont_load_item();
3041   LIR_Opr reg = rlock_result(x);
3042 
3043   __ cmp(lir_cond(x->cond()), left.result(), right.result());







3044   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3045 }
3046 
3047 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3048     assert(x->number_of_arguments() == expected_arguments, "wrong type");
3049     LIR_Opr reg = result_register_for(x->type());
3050     __ call_runtime_leaf(routine, getThreadTemp(),
3051                          reg, new LIR_OprList());
3052     LIR_Opr result = rlock_result(x);
3053     __ move(reg, result);
3054 }
3055 
3056 #ifdef TRACE_HAVE_INTRINSICS
3057 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3058     LIR_Opr thread = getThreadPointer();
3059     LIR_Opr osthread = new_pointer_register();
3060     __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3061     size_t thread_id_size = OSThread::thread_id_size();
3062     if (thread_id_size == (size_t) BytesPerLong) {
3063       LIR_Opr id = new_register(T_LONG);




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"
  37 #include "gc/shenandoah/brooksPointer.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/vm_version.hpp"
  42 #include "utilities/bitMap.inline.hpp"
  43 #include "utilities/macros.hpp"
  44 #if INCLUDE_ALL_GCS
  45 #include "gc/g1/heapRegion.hpp"
  46 #endif // INCLUDE_ALL_GCS
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 #ifndef PATCHED_ADDR
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 


 231   }
 232 }
 233 
 234 
 235 void LIRItem::load_for_store(BasicType type) {
 236   if (_gen->can_store_as_constant(value(), type)) {
 237     _result = value()->operand();
 238     if (!_result->is_constant()) {
 239       _result = LIR_OprFact::value_type(value()->type());
 240     }
 241   } else if (type == T_BYTE || type == T_BOOLEAN) {
 242     load_byte_item();
 243   } else {
 244     load_item();
 245   }
 246 }
 247 
 248 void LIRItem::load_item_force(LIR_Opr reg) {
 249   LIR_Opr r = result();
 250   if (r != reg) {
 251     _result = _gen->force_opr_to(r, reg);
 252   }
 253 }
 254 
 255 LIR_Opr LIRGenerator::force_opr_to(LIR_Opr op, LIR_Opr reg) {
 256   if (op != reg) {
 257 #if !defined(ARM) && !defined(E500V2)
 258     if (op->type() != reg->type()) {
 259       // moves between different types need an intervening spill slot
 260       op = force_to_spill(op, reg->type());
 261     }
 262 #endif
 263     __ move(op, reg);
 264     return reg;
 265   } else {
 266     return op;
 267   }
 268 }
 269 
 270 ciObject* LIRItem::get_jobject_constant() const {
 271   ObjectType* oc = type()->as_ObjectType();
 272   if (oc) {
 273     return oc->constant_value();
 274   }
 275   return NULL;
 276 }
 277 
 278 
 279 jint LIRItem::get_jint_constant() const {
 280   assert(is_constant() && value() != NULL, "");
 281   assert(type()->as_IntConstant() != NULL, "type check");
 282   return type()->as_IntConstant()->value();
 283 }
 284 
 285 
 286 jint LIRItem::get_address_constant() const {


1414       }
1415       return _reg_for_constants.at(i);
1416     }
1417   }
1418 
1419   LIR_Opr result = new_register(t);
1420   __ move((LIR_Opr)c, result);
1421   _constants.append(c);
1422   _reg_for_constants.append(result);
1423   return result;
1424 }
1425 
1426 // Various barriers
1427 
1428 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1429                                bool do_load, bool patch, CodeEmitInfo* info) {
1430   // Do the pre-write barrier, if any.
1431   switch (_bs->kind()) {
1432 #if INCLUDE_ALL_GCS
1433     case BarrierSet::G1SATBCTLogging:
1434     case BarrierSet::ShenandoahBarrierSet:
1435       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1436       break;
1437 #endif // INCLUDE_ALL_GCS
1438     case BarrierSet::CardTableForRS:
1439     case BarrierSet::CardTableExtension:
1440       // No pre barriers
1441       break;
1442     case BarrierSet::ModRef:
1443       // No pre barriers
1444       break;
1445     default      :
1446       ShouldNotReachHere();
1447 
1448   }
1449 }
1450 
1451 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1452   switch (_bs->kind()) {
1453 #if INCLUDE_ALL_GCS
1454     case BarrierSet::G1SATBCTLogging:
1455       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1456       break;
1457     case BarrierSet::ShenandoahBarrierSet:
1458       break;
1459 #endif // INCLUDE_ALL_GCS
1460     case BarrierSet::CardTableForRS:
1461     case BarrierSet::CardTableExtension:
1462       CardTableModRef_post_barrier(addr,  new_val);
1463       break;
1464     case BarrierSet::ModRef:
1465       // No post barriers
1466       break;
1467     default      :
1468       ShouldNotReachHere();
1469     }
1470 }
1471 
1472 ////////////////////////////////////////////////////////////////////////
1473 #if INCLUDE_ALL_GCS
1474 
1475 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1476                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1477   // First we test whether marking is in progress.
1478   BasicType flag_type;


1709     // load item if field not constant
1710     // because of code patching we cannot inline constants
1711     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1712       value.load_byte_item();
1713     } else  {
1714       value.load_item();
1715     }
1716   } else {
1717     value.load_for_store(field_type);
1718   }
1719 
1720   set_no_result(x);
1721 
1722 #ifndef PRODUCT
1723   if (PrintNotLoaded && needs_patching) {
1724     tty->print_cr("   ###class not loaded at store_%s bci %d",
1725                   x->is_static() ?  "static" : "field", x->printable_bci());
1726   }
1727 #endif
1728 
1729   LIR_Opr obj = object.result();
1730 
1731   if (x->needs_null_check() &&
1732       (needs_patching ||
1733        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1734     // emit an explicit null check because the offset is too large
1735     __ null_check(obj, new CodeEmitInfo(info));
1736   }
1737 
1738   obj = shenandoah_write_barrier(obj, info, x->needs_null_check());
1739   LIR_Opr val = value.result();
1740   if (is_oop && UseShenandoahGC) {
1741     if (! val->is_register()) {
1742       assert(val->is_constant(), "expect constant");
1743     } else {
1744       val = shenandoah_read_barrier(val, NULL, true);
1745     }
1746   }
1747 
1748   LIR_Address* address;
1749   if (needs_patching) {
1750     // we need to patch the offset in the instruction so don't allow
1751     // generate_address to try to be smart about emitting the -1.
1752     // Otherwise the patching code won't know how to find the
1753     // instruction to patch.
1754     address = new LIR_Address(obj, PATCHED_ADDR, field_type);
1755   } else {
1756     address = generate_address(obj, x->offset(), field_type);
1757   }
1758 
1759   if (is_volatile && os::is_MP()) {
1760     __ membar_release();
1761   }
1762 
1763   if (is_oop) {
1764     // Do the pre-write barrier, if any.
1765     pre_barrier(LIR_OprFact::address(address),
1766                 LIR_OprFact::illegalOpr /* pre_val */,
1767                 true /* do_load*/,
1768                 needs_patching,
1769                 (info ? new CodeEmitInfo(info) : NULL));
1770   }
1771 
1772   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1773   if (needs_atomic_access && !needs_patching) {
1774     volatile_field_store(val, address, info);
1775   } else {
1776     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1777     __ store(val, address, info, patch_code);
1778   }
1779 
1780   if (is_oop) {
1781     // Store to object so mark the card of the header
1782     post_barrier(obj, val);
1783   }
1784 
1785   if (is_volatile && os::is_MP()) {
1786     __ membar();
1787   }
1788 }
1789 
1790 
1791 void LIRGenerator::do_LoadField(LoadField* x) {
1792   bool needs_patching = x->needs_patching();
1793   bool is_volatile = x->field()->is_volatile();
1794   BasicType field_type = x->field_type();
1795 
1796   CodeEmitInfo* info = NULL;
1797   if (needs_patching) {
1798     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1799     info = state_for(x, x->state_before());
1800   } else if (x->needs_null_check()) {
1801     NullCheck* nc = x->explicit_null_check();
1802     if (nc == NULL) {
1803       info = state_for(x);
1804     } else {
1805       info = state_for(nc);
1806     }
1807   }
1808 
1809   LIRItem object(x->obj(), this);
1810 
1811   object.load_item();
1812 
1813 #ifndef PRODUCT
1814   if (PrintNotLoaded && needs_patching) {
1815     tty->print_cr("   ###class not loaded at load_%s bci %d",
1816                   x->is_static() ?  "static" : "field", x->printable_bci());
1817   }
1818 #endif
1819 
1820   LIR_Opr obj = object.result();
1821   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1822   if (x->needs_null_check() &&
1823       (needs_patching ||
1824        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1825        stress_deopt)) {

1826     if (stress_deopt) {
1827       obj = new_register(T_OBJECT);
1828       __ move(LIR_OprFact::oopConst(NULL), obj);
1829     }
1830     // emit an explicit null check because the offset is too large
1831     __ null_check(obj, new CodeEmitInfo(info));
1832   }
1833 
1834   obj = shenandoah_read_barrier(obj, info, x->needs_null_check() && x->explicit_null_check() != NULL);
1835   LIR_Opr reg = rlock_result(x, field_type);
1836   LIR_Address* address;
1837   if (needs_patching) {
1838     // we need to patch the offset in the instruction so don't allow
1839     // generate_address to try to be smart about emitting the -1.
1840     // Otherwise the patching code won't know how to find the
1841     // instruction to patch.
1842     address = new LIR_Address(obj, PATCHED_ADDR, field_type);
1843   } else {
1844     address = generate_address(obj, x->offset(), field_type);
1845   }
1846 
1847   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1848   if (needs_atomic_access && !needs_patching) {
1849     volatile_field_load(address, reg, info);
1850   } else {
1851     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1852     __ load(address, reg, info, patch_code);
1853   }
1854 
1855   if (is_volatile && os::is_MP()) {
1856     __ membar_acquire();
1857   }
1858 }
1859 
1860 LIR_Opr LIRGenerator::shenandoah_read_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
1861   if (UseShenandoahGC) {
1862 
1863     LabelObj* done = new LabelObj();
1864     LIR_Opr result = new_register(T_OBJECT);
1865     __ move(obj, result);
1866     if (need_null_check) {
1867       __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
1868       __ branch(lir_cond_equal, T_LONG, done->label());
1869     }
1870     LIR_Address* brooks_ptr_address = generate_address(result, BrooksPointer::BYTE_OFFSET, T_ADDRESS);
1871     __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
1872 
1873     __ branch_destination(done->label());
1874     return result;
1875   } else {
1876     return obj;
1877   }
1878 }
1879 
1880 LIR_Opr LIRGenerator::shenandoah_write_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
1881   if (UseShenandoahGC) {
1882 
1883     LIR_Opr result = new_register(T_OBJECT);
1884     LIR_Opr tmp1 = new_register(T_INT);
1885     LIR_Opr tmp2 = new_register(T_INT);
1886     __ shenandoah_wb(obj, result, tmp1, tmp2, info ? new CodeEmitInfo(info) : NULL, need_null_check);
1887     return result;
1888 
1889   } else {
1890     return obj;
1891   }
1892 }
1893 
1894 //------------------------java.nio.Buffer.checkIndex------------------------
1895 
1896 // int java.nio.Buffer.checkIndex(int)
1897 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1898   // NOTE: by the time we are in checkIndex() we are guaranteed that
1899   // the buffer is non-null (because checkIndex is package-private and
1900   // only called from within other methods in the buffer).
1901   assert(x->number_of_arguments() == 2, "wrong type");
1902   LIRItem buf  (x->argument_at(0), this);
1903   LIRItem index(x->argument_at(1), this);
1904   buf.load_item();
1905   index.load_item();
1906 
1907   LIR_Opr result = rlock_result(x);
1908   if (GenerateRangeChecks) {
1909     CodeEmitInfo* info = state_for(x);
1910     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1911     if (index.result()->is_constant()) {
1912       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);


1969   } else {
1970     index.load_item();
1971   }
1972 
1973   CodeEmitInfo* range_check_info = state_for(x);
1974   CodeEmitInfo* null_check_info = NULL;
1975   if (x->needs_null_check()) {
1976     NullCheck* nc = x->explicit_null_check();
1977     if (nc != NULL) {
1978       null_check_info = state_for(nc);
1979     } else {
1980       null_check_info = range_check_info;
1981     }
1982     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1983       LIR_Opr obj = new_register(T_OBJECT);
1984       __ move(LIR_OprFact::oopConst(NULL), obj);
1985       __ null_check(obj, new CodeEmitInfo(null_check_info));
1986     }
1987   }
1988 
1989   LIR_Opr ary = array.result();
1990   ary = shenandoah_read_barrier(ary, null_check_info, null_check_info != NULL);
1991 
1992   // emit array address setup early so it schedules better
1993   LIR_Address* array_addr = emit_array_address(ary, index.result(), x->elt_type(), false);
1994 
1995   if (GenerateRangeChecks && needs_range_check) {
1996     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1997       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1998     } else if (use_length) {
1999       // TODO: use a (modified) version of array_range_check that does not require a
2000       //       constant length to be loaded to a register
2001       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2002       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2003     } else {
2004       array_range_check(ary, index.result(), null_check_info, range_check_info);
2005       // The range check performs the null check, so clear it out for the load
2006       null_check_info = NULL;
2007     }
2008   }
2009 
2010   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
2011 }
2012 
2013 
2014 void LIRGenerator::do_NullCheck(NullCheck* x) {
2015   if (x->can_trap()) {
2016     LIRItem value(x->obj(), this);
2017     value.load_item();
2018     CodeEmitInfo* info = state_for(x);
2019     __ null_check(value.result(), info);
2020   }
2021 }
2022 
2023 
2024 void LIRGenerator::do_TypeCast(TypeCast* x) {


2297   LIR_Opr value = rlock_result(x, x->basic_type());
2298 
2299   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2300 
2301 #if INCLUDE_ALL_GCS
2302   // We might be reading the value of the referent field of a
2303   // Reference object in order to attach it back to the live
2304   // object graph. If G1 is enabled then we need to record
2305   // the value that is being returned in an SATB log buffer.
2306   //
2307   // We need to generate code similar to the following...
2308   //
2309   // if (offset == java_lang_ref_Reference::referent_offset) {
2310   //   if (src != NULL) {
2311   //     if (klass(src)->reference_type() != REF_NONE) {
2312   //       pre_barrier(..., value, ...);
2313   //     }
2314   //   }
2315   // }
2316 
2317   if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) {
2318     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2319     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2320     bool gen_source_check = true;    // Assume we need to check the src object for null.
2321     bool gen_type_check = true;      // Assume we need to check the reference_type.
2322 
2323     if (off.is_constant()) {
2324       jlong off_con = (off.type()->is_int() ?
2325                         (jlong) off.get_jint_constant() :
2326                         off.get_jlong_constant());
2327 
2328 
2329       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2330         // The constant offset is something other than referent_offset.
2331         // We can skip generating/checking the remaining guards and
2332         // skip generation of the code stub.
2333         gen_pre_barrier = false;
2334       } else {
2335         // The constant offset is the same as referent_offset -
2336         // we do not need to generate a runtime offset check.
2337         gen_offset_check = false;


2839     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2840   }
2841 
2842   if (method()->is_synchronized()) {
2843     LIR_Opr obj;
2844     if (method()->is_static()) {
2845       obj = new_register(T_OBJECT);
2846       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2847     } else {
2848       Local* receiver = x->state()->local_at(0)->as_Local();
2849       assert(receiver != NULL, "must already exist");
2850       obj = receiver->operand();
2851     }
2852     assert(obj->is_valid(), "must be valid");
2853 
2854     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2855       LIR_Opr lock = new_register(T_INT);
2856       __ load_stack_address_monitor(0, lock);
2857 
2858       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2859       obj = shenandoah_write_barrier(obj, info, false);
2860       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2861 
2862       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2863       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2864     }
2865   }
2866   if (compilation()->age_code()) {
2867     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2868     decrement_age(info);
2869   }
2870   // increment invocation counters if needed
2871   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2872     profile_parameters(x);
2873     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2874     increment_invocation_counter(info);
2875   }
2876 
2877   // all blocks with a successor must end with an unconditional jump
2878   // to the successor even if they are consecutive
2879   __ jump(x->default_sux());


3060   if (result_register->is_valid()) {
3061     LIR_Opr result = rlock_result(x);
3062     __ move(result_register, result);
3063   }
3064 }
3065 
3066 
3067 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3068   assert(x->number_of_arguments() == 1, "wrong type");
3069   LIRItem value       (x->argument_at(0), this);
3070   LIR_Opr reg = rlock_result(x);
3071   value.load_item();
3072   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3073   __ move(tmp, reg);
3074 }
3075 
3076 
3077 
3078 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3079 void LIRGenerator::do_IfOp(IfOp* x) {
3080   ValueTag xtag = x->x()->type()->tag();
3081 #ifdef ASSERT
3082   {

3083     ValueTag ttag = x->tval()->type()->tag();
3084     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3085     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3086     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3087   }
3088 #endif
3089 
3090   LIRItem left(x->x(), this);
3091   LIRItem right(x->y(), this);
3092   left.load_item();
3093   if (can_inline_as_constant(right.value())) {
3094     right.dont_load_item();
3095   } else {
3096     right.load_item();
3097   }
3098 
3099   LIRItem t_val(x->tval(), this);
3100   LIRItem f_val(x->fval(), this);
3101   t_val.dont_load_item();
3102   f_val.dont_load_item();
3103   LIR_Opr reg = rlock_result(x);
3104 
3105   LIR_Opr left_opr = left.result();
3106   LIR_Opr right_opr = right.result();
3107   if (xtag == objectTag && UseShenandoahGC && x->y()->type() != objectNull) { // Don't need to resolve for ifnull.
3108     left_opr = shenandoah_write_barrier(left_opr, NULL, true);
3109     right_opr = shenandoah_read_barrier(right_opr, NULL, true);
3110   }
3111 
3112   __ cmp(lir_cond(x->cond()), left_opr, right_opr);
3113   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3114 }
3115 
3116 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3117     assert(x->number_of_arguments() == expected_arguments, "wrong type");
3118     LIR_Opr reg = result_register_for(x->type());
3119     __ call_runtime_leaf(routine, getThreadTemp(),
3120                          reg, new LIR_OprList());
3121     LIR_Opr result = rlock_result(x);
3122     __ move(reg, result);
3123 }
3124 
3125 #ifdef TRACE_HAVE_INTRINSICS
3126 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3127     LIR_Opr thread = getThreadPointer();
3128     LIR_Opr osthread = new_pointer_register();
3129     __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3130     size_t thread_id_size = OSThread::thread_id_size();
3131     if (thread_id_size == (size_t) BytesPerLong) {
3132       LIR_Opr id = new_register(T_LONG);


< prev index next >