< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {
 238       _result = LIR_OprFact::value_type(value()->type());
 239     }
 240   } else if (type == T_BYTE || type == T_BOOLEAN) {
 241     load_byte_item();
 242   } else {
 243     load_item();
 244   }
 245 }
 246 
 247 void LIRItem::load_item_force(LIR_Opr reg) {
 248   LIR_Opr r = result();
 249   if (r != reg) {






 250 #if !defined(ARM) && !defined(E500V2)
 251     if (r->type() != reg->type()) {
 252       // moves between different types need an intervening spill slot
 253       r = _gen->force_to_spill(r, reg->type());
 254     }
 255 #endif
 256     __ move(r, reg);
 257     _result = reg;


 258   }
 259 }
 260 
 261 ciObject* LIRItem::get_jobject_constant() const {
 262   ObjectType* oc = type()->as_ObjectType();
 263   if (oc) {
 264     return oc->constant_value();
 265   }
 266   return NULL;
 267 }
 268 
 269 
 270 jint LIRItem::get_jint_constant() const {
 271   assert(is_constant() && value() != NULL, "");
 272   assert(type()->as_IntConstant() != NULL, "type check");
 273   return type()->as_IntConstant()->value();
 274 }
 275 
 276 
 277 jint LIRItem::get_address_constant() const {


1405       }
1406       return _reg_for_constants.at(i);
1407     }
1408   }
1409 
1410   LIR_Opr result = new_register(t);
1411   __ move((LIR_Opr)c, result);
1412   _constants.append(c);
1413   _reg_for_constants.append(result);
1414   return result;
1415 }
1416 
1417 // Various barriers
1418 
1419 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1420                                bool do_load, bool patch, CodeEmitInfo* info) {
1421   // Do the pre-write barrier, if any.
1422   switch (_bs->kind()) {
1423 #if INCLUDE_ALL_GCS
1424     case BarrierSet::G1SATBCTLogging:

1425       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1426       break;
1427 #endif // INCLUDE_ALL_GCS
1428     case BarrierSet::CardTableForRS:
1429     case BarrierSet::CardTableExtension:
1430       // No pre barriers
1431       break;
1432     case BarrierSet::ModRef:
1433       // No pre barriers
1434       break;
1435     default      :
1436       ShouldNotReachHere();
1437 
1438   }
1439 }
1440 
1441 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1442   switch (_bs->kind()) {
1443 #if INCLUDE_ALL_GCS
1444     case BarrierSet::G1SATBCTLogging:
1445       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1446       break;


1447 #endif // INCLUDE_ALL_GCS
1448     case BarrierSet::CardTableForRS:
1449     case BarrierSet::CardTableExtension:
1450       CardTableModRef_post_barrier(addr,  new_val);
1451       break;
1452     case BarrierSet::ModRef:
1453       // No post barriers
1454       break;
1455     default      :
1456       ShouldNotReachHere();
1457     }
1458 }
1459 
1460 ////////////////////////////////////////////////////////////////////////
1461 #if INCLUDE_ALL_GCS
1462 
1463 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1464                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1465   // First we test whether marking is in progress.
1466   BasicType flag_type;


1697     // load item if field not constant
1698     // because of code patching we cannot inline constants
1699     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1700       value.load_byte_item();
1701     } else  {
1702       value.load_item();
1703     }
1704   } else {
1705     value.load_for_store(field_type);
1706   }
1707 
1708   set_no_result(x);
1709 
1710 #ifndef PRODUCT
1711   if (PrintNotLoaded && needs_patching) {
1712     tty->print_cr("   ###class not loaded at store_%s bci %d",
1713                   x->is_static() ?  "static" : "field", x->printable_bci());
1714   }
1715 #endif
1716 


1717   if (x->needs_null_check() &&
1718       (needs_patching ||
1719        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1720     // emit an explicit null check because the offset is too large
1721     __ null_check(object.result(), new CodeEmitInfo(info));










1722   }
1723 
1724   LIR_Address* address;
1725   if (needs_patching) {
1726     // we need to patch the offset in the instruction so don't allow
1727     // generate_address to try to be smart about emitting the -1.
1728     // Otherwise the patching code won't know how to find the
1729     // instruction to patch.
1730     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1731   } else {
1732     address = generate_address(object.result(), x->offset(), field_type);
1733   }
1734 
1735   if (is_volatile && os::is_MP()) {
1736     __ membar_release();
1737   }
1738 
1739   if (is_oop) {
1740     // Do the pre-write barrier, if any.
1741     pre_barrier(LIR_OprFact::address(address),
1742                 LIR_OprFact::illegalOpr /* pre_val */,
1743                 true /* do_load*/,
1744                 needs_patching,
1745                 (info ? new CodeEmitInfo(info) : NULL));
1746   }
1747 
1748   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1749   if (needs_atomic_access && !needs_patching) {
1750     volatile_field_store(value.result(), address, info);
1751   } else {
1752     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1753     __ store(value.result(), address, info, patch_code);
1754   }
1755 
1756   if (is_oop) {
1757     // Store to object so mark the card of the header
1758     post_barrier(object.result(), value.result());
1759   }
1760 
1761   if (is_volatile && os::is_MP()) {
1762     __ membar();
1763   }
1764 }
1765 
1766 
1767 void LIRGenerator::do_LoadField(LoadField* x) {
1768   bool needs_patching = x->needs_patching();
1769   bool is_volatile = x->field()->is_volatile();
1770   BasicType field_type = x->field_type();
1771 
1772   CodeEmitInfo* info = NULL;
1773   if (needs_patching) {
1774     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1775     info = state_for(x, x->state_before());
1776   } else if (x->needs_null_check()) {
1777     NullCheck* nc = x->explicit_null_check();
1778     if (nc == NULL) {
1779       info = state_for(x);
1780     } else {
1781       info = state_for(nc);
1782     }
1783   }
1784 
1785   LIRItem object(x->obj(), this);
1786 
1787   object.load_item();
1788 
1789 #ifndef PRODUCT
1790   if (PrintNotLoaded && needs_patching) {
1791     tty->print_cr("   ###class not loaded at load_%s bci %d",
1792                   x->is_static() ?  "static" : "field", x->printable_bci());
1793   }
1794 #endif
1795 

1796   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1797   if (x->needs_null_check() &&
1798       (needs_patching ||
1799        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1800        stress_deopt)) {
1801     LIR_Opr obj = object.result();
1802     if (stress_deopt) {
1803       obj = new_register(T_OBJECT);
1804       __ move(LIR_OprFact::oopConst(NULL), obj);
1805     }
1806     // emit an explicit null check because the offset is too large
1807     __ null_check(obj, new CodeEmitInfo(info));
1808   }
1809 

1810   LIR_Opr reg = rlock_result(x, field_type);
1811   LIR_Address* address;
1812   if (needs_patching) {
1813     // we need to patch the offset in the instruction so don't allow
1814     // generate_address to try to be smart about emitting the -1.
1815     // Otherwise the patching code won't know how to find the
1816     // instruction to patch.
1817     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1818   } else {
1819     address = generate_address(object.result(), x->offset(), field_type);
1820   }
1821 
1822   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1823   if (needs_atomic_access && !needs_patching) {
1824     volatile_field_load(address, reg, info);
1825   } else {
1826     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1827     __ load(address, reg, info, patch_code);
1828   }
1829 
1830   if (is_volatile && os::is_MP()) {
1831     __ membar_acquire();
1832   }
1833 }
1834 

































1835 
1836 //------------------------java.nio.Buffer.checkIndex------------------------
1837 
1838 // int java.nio.Buffer.checkIndex(int)
1839 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1840   // NOTE: by the time we are in checkIndex() we are guaranteed that
1841   // the buffer is non-null (because checkIndex is package-private and
1842   // only called from within other methods in the buffer).
1843   assert(x->number_of_arguments() == 2, "wrong type");
1844   LIRItem buf  (x->argument_at(0), this);
1845   LIRItem index(x->argument_at(1), this);
1846   buf.load_item();
1847   index.load_item();
1848 
1849   LIR_Opr result = rlock_result(x);
1850   if (GenerateRangeChecks) {
1851     CodeEmitInfo* info = state_for(x);
1852     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1853     if (index.result()->is_constant()) {
1854       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);


1911   } else {
1912     index.load_item();
1913   }
1914 
1915   CodeEmitInfo* range_check_info = state_for(x);
1916   CodeEmitInfo* null_check_info = NULL;
1917   if (x->needs_null_check()) {
1918     NullCheck* nc = x->explicit_null_check();
1919     if (nc != NULL) {
1920       null_check_info = state_for(nc);
1921     } else {
1922       null_check_info = range_check_info;
1923     }
1924     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1925       LIR_Opr obj = new_register(T_OBJECT);
1926       __ move(LIR_OprFact::oopConst(NULL), obj);
1927       __ null_check(obj, new CodeEmitInfo(null_check_info));
1928     }
1929   }
1930 



1931   // emit array address setup early so it schedules better
1932   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1933 
1934   if (GenerateRangeChecks && needs_range_check) {
1935     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1936       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1937     } else if (use_length) {
1938       // TODO: use a (modified) version of array_range_check that does not require a
1939       //       constant length to be loaded to a register
1940       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1941       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1942     } else {
1943       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1944       // The range check performs the null check, so clear it out for the load
1945       null_check_info = NULL;
1946     }
1947   }
1948 
1949   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1950 }
1951 
1952 
1953 void LIRGenerator::do_NullCheck(NullCheck* x) {
1954   if (x->can_trap()) {
1955     LIRItem value(x->obj(), this);
1956     value.load_item();
1957     CodeEmitInfo* info = state_for(x);
1958     __ null_check(value.result(), info);
1959   }
1960 }
1961 
1962 
1963 void LIRGenerator::do_TypeCast(TypeCast* x) {


2236   LIR_Opr value = rlock_result(x, x->basic_type());
2237 
2238   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2239 
2240 #if INCLUDE_ALL_GCS
2241   // We might be reading the value of the referent field of a
2242   // Reference object in order to attach it back to the live
2243   // object graph. If G1 is enabled then we need to record
2244   // the value that is being returned in an SATB log buffer.
2245   //
2246   // We need to generate code similar to the following...
2247   //
2248   // if (offset == java_lang_ref_Reference::referent_offset) {
2249   //   if (src != NULL) {
2250   //     if (klass(src)->reference_type() != REF_NONE) {
2251   //       pre_barrier(..., value, ...);
2252   //     }
2253   //   }
2254   // }
2255 
2256   if (UseG1GC && type == T_OBJECT) {
2257     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2258     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2259     bool gen_source_check = true;    // Assume we need to check the src object for null.
2260     bool gen_type_check = true;      // Assume we need to check the reference_type.
2261 
2262     if (off.is_constant()) {
2263       jlong off_con = (off.type()->is_int() ?
2264                         (jlong) off.get_jint_constant() :
2265                         off.get_jlong_constant());
2266 
2267 
2268       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2269         // The constant offset is something other than referent_offset.
2270         // We can skip generating/checking the remaining guards and
2271         // skip generation of the code stub.
2272         gen_pre_barrier = false;
2273       } else {
2274         // The constant offset is the same as referent_offset -
2275         // we do not need to generate a runtime offset check.
2276         gen_offset_check = false;


2778     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2779   }
2780 
2781   if (method()->is_synchronized()) {
2782     LIR_Opr obj;
2783     if (method()->is_static()) {
2784       obj = new_register(T_OBJECT);
2785       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2786     } else {
2787       Local* receiver = x->state()->local_at(0)->as_Local();
2788       assert(receiver != NULL, "must already exist");
2789       obj = receiver->operand();
2790     }
2791     assert(obj->is_valid(), "must be valid");
2792 
2793     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2794       LIR_Opr lock = new_register(T_INT);
2795       __ load_stack_address_monitor(0, lock);
2796 
2797       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));

2798       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2799 
2800       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2801       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2802     }
2803   }
2804   if (compilation()->age_code()) {
2805     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2806     decrement_age(info);
2807   }
2808   // increment invocation counters if needed
2809   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2810     profile_parameters(x);
2811     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2812     increment_invocation_counter(info);
2813   }
2814 
2815   // all blocks with a successor must end with an unconditional jump
2816   // to the successor even if they are consecutive
2817   __ jump(x->default_sux());


2998   if (result_register->is_valid()) {
2999     LIR_Opr result = rlock_result(x);
3000     __ move(result_register, result);
3001   }
3002 }
3003 
3004 
3005 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3006   assert(x->number_of_arguments() == 1, "wrong type");
3007   LIRItem value       (x->argument_at(0), this);
3008   LIR_Opr reg = rlock_result(x);
3009   value.load_item();
3010   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3011   __ move(tmp, reg);
3012 }
3013 
3014 
3015 
3016 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3017 void LIRGenerator::do_IfOp(IfOp* x) {

3018 #ifdef ASSERT
3019   {
3020     ValueTag xtag = x->x()->type()->tag();
3021     ValueTag ttag = x->tval()->type()->tag();
3022     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3023     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3024     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3025   }
3026 #endif
3027 
3028   LIRItem left(x->x(), this);
3029   LIRItem right(x->y(), this);
3030   left.load_item();
3031   if (can_inline_as_constant(right.value())) {
3032     right.dont_load_item();
3033   } else {
3034     right.load_item();
3035   }
3036 
3037   LIRItem t_val(x->tval(), this);
3038   LIRItem f_val(x->fval(), this);
3039   t_val.dont_load_item();
3040   f_val.dont_load_item();
3041   LIR_Opr reg = rlock_result(x);
3042 
3043   __ cmp(lir_cond(x->cond()), left.result(), right.result());







3044   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3045 }
3046 
3047 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3048     assert(x->number_of_arguments() == expected_arguments, "wrong type");
3049     LIR_Opr reg = result_register_for(x->type());
3050     __ call_runtime_leaf(routine, getThreadTemp(),
3051                          reg, new LIR_OprList());
3052     LIR_Opr result = rlock_result(x);
3053     __ move(reg, result);
3054 }
3055 
3056 #ifdef TRACE_HAVE_INTRINSICS
3057 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3058     LIR_Opr thread = getThreadPointer();
3059     LIR_Opr osthread = new_pointer_register();
3060     __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3061     size_t thread_id_size = OSThread::thread_id_size();
3062     if (thread_id_size == (size_t) BytesPerLong) {
3063       LIR_Opr id = new_register(T_LONG);




 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {
 238       _result = LIR_OprFact::value_type(value()->type());
 239     }
 240   } else if (type == T_BYTE || type == T_BOOLEAN) {
 241     load_byte_item();
 242   } else {
 243     load_item();
 244   }
 245 }
 246 
 247 void LIRItem::load_item_force(LIR_Opr reg) {
 248   LIR_Opr r = result();
 249   if (r != reg) {
 250     _result = _gen->force_opr_to(r, reg);
 251   }
 252 }
 253 
 254 LIR_Opr LIRGenerator::force_opr_to(LIR_Opr op, LIR_Opr reg) {
 255   if (op != reg) {
 256 #if !defined(ARM) && !defined(E500V2)
 257     if (op->type() != reg->type()) {
 258       // moves between different types need an intervening spill slot
 259       op = force_to_spill(op, reg->type());
 260     }
 261 #endif
 262     __ move(op, reg);
 263     return reg;
 264   } else {
 265     return op;
 266   }
 267 }
 268 
 269 ciObject* LIRItem::get_jobject_constant() const {
 270   ObjectType* oc = type()->as_ObjectType();
 271   if (oc) {
 272     return oc->constant_value();
 273   }
 274   return NULL;
 275 }
 276 
 277 
 278 jint LIRItem::get_jint_constant() const {
 279   assert(is_constant() && value() != NULL, "");
 280   assert(type()->as_IntConstant() != NULL, "type check");
 281   return type()->as_IntConstant()->value();
 282 }
 283 
 284 
 285 jint LIRItem::get_address_constant() const {


1413       }
1414       return _reg_for_constants.at(i);
1415     }
1416   }
1417 
1418   LIR_Opr result = new_register(t);
1419   __ move((LIR_Opr)c, result);
1420   _constants.append(c);
1421   _reg_for_constants.append(result);
1422   return result;
1423 }
1424 
1425 // Various barriers
1426 
1427 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1428                                bool do_load, bool patch, CodeEmitInfo* info) {
1429   // Do the pre-write barrier, if any.
1430   switch (_bs->kind()) {
1431 #if INCLUDE_ALL_GCS
1432     case BarrierSet::G1SATBCTLogging:
1433     case BarrierSet::ShenandoahBarrierSet:
1434       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1435       break;
1436 #endif // INCLUDE_ALL_GCS
1437     case BarrierSet::CardTableForRS:
1438     case BarrierSet::CardTableExtension:
1439       // No pre barriers
1440       break;
1441     case BarrierSet::ModRef:
1442       // No pre barriers
1443       break;
1444     default      :
1445       ShouldNotReachHere();
1446 
1447   }
1448 }
1449 
1450 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1451   switch (_bs->kind()) {
1452 #if INCLUDE_ALL_GCS
1453     case BarrierSet::G1SATBCTLogging:
1454       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1455       break;
1456     case BarrierSet::ShenandoahBarrierSet:
1457       break;
1458 #endif // INCLUDE_ALL_GCS
1459     case BarrierSet::CardTableForRS:
1460     case BarrierSet::CardTableExtension:
1461       CardTableModRef_post_barrier(addr,  new_val);
1462       break;
1463     case BarrierSet::ModRef:
1464       // No post barriers
1465       break;
1466     default      :
1467       ShouldNotReachHere();
1468     }
1469 }
1470 
1471 ////////////////////////////////////////////////////////////////////////
1472 #if INCLUDE_ALL_GCS
1473 
1474 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1475                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1476   // First we test whether marking is in progress.
1477   BasicType flag_type;


1708     // load item if field not constant
1709     // because of code patching we cannot inline constants
1710     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1711       value.load_byte_item();
1712     } else  {
1713       value.load_item();
1714     }
1715   } else {
1716     value.load_for_store(field_type);
1717   }
1718 
1719   set_no_result(x);
1720 
1721 #ifndef PRODUCT
1722   if (PrintNotLoaded && needs_patching) {
1723     tty->print_cr("   ###class not loaded at store_%s bci %d",
1724                   x->is_static() ?  "static" : "field", x->printable_bci());
1725   }
1726 #endif
1727 
1728   LIR_Opr obj = object.result();
1729 
1730   if (x->needs_null_check() &&
1731       (needs_patching ||
1732        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1733     // emit an explicit null check because the offset is too large
1734     __ null_check(obj, new CodeEmitInfo(info));
1735   }
1736 
1737   obj = shenandoah_write_barrier(obj, info, x->needs_null_check());
1738   LIR_Opr val = value.result();
1739   if (is_oop && UseShenandoahGC) {
1740     if (! val->is_register()) {
1741       assert(val->is_constant(), "expect constant");
1742     } else {
1743       val = shenandoah_read_barrier(val, NULL, true);
1744     }
1745   }
1746 
1747   LIR_Address* address;
1748   if (needs_patching) {
1749     // we need to patch the offset in the instruction so don't allow
1750     // generate_address to try to be smart about emitting the -1.
1751     // Otherwise the patching code won't know how to find the
1752     // instruction to patch.
1753     address = new LIR_Address(obj, PATCHED_ADDR, field_type);
1754   } else {
1755     address = generate_address(obj, x->offset(), field_type);
1756   }
1757 
1758   if (is_volatile && os::is_MP()) {
1759     __ membar_release();
1760   }
1761 
1762   if (is_oop) {
1763     // Do the pre-write barrier, if any.
1764     pre_barrier(LIR_OprFact::address(address),
1765                 LIR_OprFact::illegalOpr /* pre_val */,
1766                 true /* do_load*/,
1767                 needs_patching,
1768                 (info ? new CodeEmitInfo(info) : NULL));
1769   }
1770 
1771   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1772   if (needs_atomic_access && !needs_patching) {
1773     volatile_field_store(val, address, info);
1774   } else {
1775     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1776     __ store(val, address, info, patch_code);
1777   }
1778 
1779   if (is_oop) {
1780     // Store to object so mark the card of the header
1781     post_barrier(obj, val);
1782   }
1783 
1784   if (is_volatile && os::is_MP()) {
1785     __ membar();
1786   }
1787 }
1788 
1789 
1790 void LIRGenerator::do_LoadField(LoadField* x) {
1791   bool needs_patching = x->needs_patching();
1792   bool is_volatile = x->field()->is_volatile();
1793   BasicType field_type = x->field_type();
1794 
1795   CodeEmitInfo* info = NULL;
1796   if (needs_patching) {
1797     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1798     info = state_for(x, x->state_before());
1799   } else if (x->needs_null_check()) {
1800     NullCheck* nc = x->explicit_null_check();
1801     if (nc == NULL) {
1802       info = state_for(x);
1803     } else {
1804       info = state_for(nc);
1805     }
1806   }
1807 
1808   LIRItem object(x->obj(), this);
1809 
1810   object.load_item();
1811 
1812 #ifndef PRODUCT
1813   if (PrintNotLoaded && needs_patching) {
1814     tty->print_cr("   ###class not loaded at load_%s bci %d",
1815                   x->is_static() ?  "static" : "field", x->printable_bci());
1816   }
1817 #endif
1818 
1819   LIR_Opr obj = object.result();
1820   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1821   if (x->needs_null_check() &&
1822       (needs_patching ||
1823        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1824        stress_deopt)) {

1825     if (stress_deopt) {
1826       obj = new_register(T_OBJECT);
1827       __ move(LIR_OprFact::oopConst(NULL), obj);
1828     }
1829     // emit an explicit null check because the offset is too large
1830     __ null_check(obj, new CodeEmitInfo(info));
1831   }
1832 
1833   obj = shenandoah_read_barrier(obj, info, x->needs_null_check() && x->explicit_null_check() != NULL);
1834   LIR_Opr reg = rlock_result(x, field_type);
1835   LIR_Address* address;
1836   if (needs_patching) {
1837     // we need to patch the offset in the instruction so don't allow
1838     // generate_address to try to be smart about emitting the -1.
1839     // Otherwise the patching code won't know how to find the
1840     // instruction to patch.
1841     address = new LIR_Address(obj, PATCHED_ADDR, field_type);
1842   } else {
1843     address = generate_address(obj, x->offset(), field_type);
1844   }
1845 
1846   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1847   if (needs_atomic_access && !needs_patching) {
1848     volatile_field_load(address, reg, info);
1849   } else {
1850     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1851     __ load(address, reg, info, patch_code);
1852   }
1853 
1854   if (is_volatile && os::is_MP()) {
1855     __ membar_acquire();
1856   }
1857 }
1858 
1859 LIR_Opr LIRGenerator::shenandoah_read_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
1860   if (UseShenandoahGC) {
1861 
1862     LabelObj* done = new LabelObj();
1863     LIR_Opr result = new_register(T_OBJECT);
1864     __ move(obj, result);
1865     if (need_null_check) {
1866       __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
1867       __ branch(lir_cond_equal, T_LONG, done->label());
1868     }
1869     LIR_Address* brooks_ptr_address = generate_address(result, -8, T_ADDRESS);
1870     __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
1871 
1872     __ branch_destination(done->label());
1873     return result;
1874   } else {
1875     return obj;
1876   }
1877 }
1878 
1879 LIR_Opr LIRGenerator::shenandoah_write_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
1880   if (UseShenandoahGC) {
1881 
1882     LIR_Opr result = new_register(T_OBJECT);
1883     LIR_Opr tmp1 = new_register(T_INT);
1884     LIR_Opr tmp2 = new_register(T_INT);
1885     __ shenandoah_wb(obj, result, tmp1, tmp2, info ? new CodeEmitInfo(info) : NULL, need_null_check);
1886     return result;
1887 
1888   } else {
1889     return obj;
1890   }
1891 }
1892 
1893 //------------------------java.nio.Buffer.checkIndex------------------------
1894 
1895 // int java.nio.Buffer.checkIndex(int)
1896 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1897   // NOTE: by the time we are in checkIndex() we are guaranteed that
1898   // the buffer is non-null (because checkIndex is package-private and
1899   // only called from within other methods in the buffer).
1900   assert(x->number_of_arguments() == 2, "wrong type");
1901   LIRItem buf  (x->argument_at(0), this);
1902   LIRItem index(x->argument_at(1), this);
1903   buf.load_item();
1904   index.load_item();
1905 
1906   LIR_Opr result = rlock_result(x);
1907   if (GenerateRangeChecks) {
1908     CodeEmitInfo* info = state_for(x);
1909     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1910     if (index.result()->is_constant()) {
1911       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);


1968   } else {
1969     index.load_item();
1970   }
1971 
1972   CodeEmitInfo* range_check_info = state_for(x);
1973   CodeEmitInfo* null_check_info = NULL;
1974   if (x->needs_null_check()) {
1975     NullCheck* nc = x->explicit_null_check();
1976     if (nc != NULL) {
1977       null_check_info = state_for(nc);
1978     } else {
1979       null_check_info = range_check_info;
1980     }
1981     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1982       LIR_Opr obj = new_register(T_OBJECT);
1983       __ move(LIR_OprFact::oopConst(NULL), obj);
1984       __ null_check(obj, new CodeEmitInfo(null_check_info));
1985     }
1986   }
1987 
1988   LIR_Opr ary = array.result();
1989   ary = shenandoah_read_barrier(ary, null_check_info, null_check_info != NULL);
1990 
1991   // emit array address setup early so it schedules better
1992   LIR_Address* array_addr = emit_array_address(ary, index.result(), x->elt_type(), false);
1993 
1994   if (GenerateRangeChecks && needs_range_check) {
1995     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1996       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1997     } else if (use_length) {
1998       // TODO: use a (modified) version of array_range_check that does not require a
1999       //       constant length to be loaded to a register
2000       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2001       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2002     } else {
2003       array_range_check(ary, index.result(), null_check_info, range_check_info);
2004       // The range check performs the null check, so clear it out for the load
2005       null_check_info = NULL;
2006     }
2007   }
2008 
2009   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
2010 }
2011 
2012 
2013 void LIRGenerator::do_NullCheck(NullCheck* x) {
2014   if (x->can_trap()) {
2015     LIRItem value(x->obj(), this);
2016     value.load_item();
2017     CodeEmitInfo* info = state_for(x);
2018     __ null_check(value.result(), info);
2019   }
2020 }
2021 
2022 
2023 void LIRGenerator::do_TypeCast(TypeCast* x) {


2296   LIR_Opr value = rlock_result(x, x->basic_type());
2297 
2298   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2299 
2300 #if INCLUDE_ALL_GCS
2301   // We might be reading the value of the referent field of a
2302   // Reference object in order to attach it back to the live
2303   // object graph. If G1 is enabled then we need to record
2304   // the value that is being returned in an SATB log buffer.
2305   //
2306   // We need to generate code similar to the following...
2307   //
2308   // if (offset == java_lang_ref_Reference::referent_offset) {
2309   //   if (src != NULL) {
2310   //     if (klass(src)->reference_type() != REF_NONE) {
2311   //       pre_barrier(..., value, ...);
2312   //     }
2313   //   }
2314   // }
2315 
2316   if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) {
2317     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2318     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2319     bool gen_source_check = true;    // Assume we need to check the src object for null.
2320     bool gen_type_check = true;      // Assume we need to check the reference_type.
2321 
2322     if (off.is_constant()) {
2323       jlong off_con = (off.type()->is_int() ?
2324                         (jlong) off.get_jint_constant() :
2325                         off.get_jlong_constant());
2326 
2327 
2328       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2329         // The constant offset is something other than referent_offset.
2330         // We can skip generating/checking the remaining guards and
2331         // skip generation of the code stub.
2332         gen_pre_barrier = false;
2333       } else {
2334         // The constant offset is the same as referent_offset -
2335         // we do not need to generate a runtime offset check.
2336         gen_offset_check = false;


2838     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2839   }
2840 
2841   if (method()->is_synchronized()) {
2842     LIR_Opr obj;
2843     if (method()->is_static()) {
2844       obj = new_register(T_OBJECT);
2845       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2846     } else {
2847       Local* receiver = x->state()->local_at(0)->as_Local();
2848       assert(receiver != NULL, "must already exist");
2849       obj = receiver->operand();
2850     }
2851     assert(obj->is_valid(), "must be valid");
2852 
2853     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2854       LIR_Opr lock = new_register(T_INT);
2855       __ load_stack_address_monitor(0, lock);
2856 
2857       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2858       obj = shenandoah_write_barrier(obj, info, false);
2859       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2860 
2861       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2862       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2863     }
2864   }
2865   if (compilation()->age_code()) {
2866     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2867     decrement_age(info);
2868   }
2869   // increment invocation counters if needed
2870   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2871     profile_parameters(x);
2872     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2873     increment_invocation_counter(info);
2874   }
2875 
2876   // all blocks with a successor must end with an unconditional jump
2877   // to the successor even if they are consecutive
2878   __ jump(x->default_sux());


3059   if (result_register->is_valid()) {
3060     LIR_Opr result = rlock_result(x);
3061     __ move(result_register, result);
3062   }
3063 }
3064 
3065 
3066 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3067   assert(x->number_of_arguments() == 1, "wrong type");
3068   LIRItem value       (x->argument_at(0), this);
3069   LIR_Opr reg = rlock_result(x);
3070   value.load_item();
3071   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3072   __ move(tmp, reg);
3073 }
3074 
3075 
3076 
3077 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3078 void LIRGenerator::do_IfOp(IfOp* x) {
3079   ValueTag xtag = x->x()->type()->tag();
3080 #ifdef ASSERT
3081   {

3082     ValueTag ttag = x->tval()->type()->tag();
3083     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3084     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3085     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3086   }
3087 #endif
3088 
3089   LIRItem left(x->x(), this);
3090   LIRItem right(x->y(), this);
3091   left.load_item();
3092   if (can_inline_as_constant(right.value())) {
3093     right.dont_load_item();
3094   } else {
3095     right.load_item();
3096   }
3097 
3098   LIRItem t_val(x->tval(), this);
3099   LIRItem f_val(x->fval(), this);
3100   t_val.dont_load_item();
3101   f_val.dont_load_item();
3102   LIR_Opr reg = rlock_result(x);
3103 
3104   LIR_Opr left_opr = left.result();
3105   LIR_Opr right_opr = right.result();
3106   if (xtag == objectTag && UseShenandoahGC && x->y()->type() != objectNull) { // Don't need to resolve for ifnull.
3107     left_opr = shenandoah_write_barrier(left_opr, NULL, true);
3108     right_opr = shenandoah_read_barrier(right_opr, NULL, true);
3109   }
3110 
3111   __ cmp(lir_cond(x->cond()), left_opr, right_opr);
3112   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3113 }
3114 
3115 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3116     assert(x->number_of_arguments() == expected_arguments, "wrong type");
3117     LIR_Opr reg = result_register_for(x->type());
3118     __ call_runtime_leaf(routine, getThreadTemp(),
3119                          reg, new LIR_OprList());
3120     LIR_Opr result = rlock_result(x);
3121     __ move(reg, result);
3122 }
3123 
3124 #ifdef TRACE_HAVE_INTRINSICS
3125 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3126     LIR_Opr thread = getThreadPointer();
3127     LIR_Opr osthread = new_pointer_register();
3128     __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3129     size_t thread_id_size = OSThread::thread_id_size();
3130     if (thread_id_size == (size_t) BytesPerLong) {
3131       LIR_Opr id = new_register(T_LONG);


< prev index next >