1234 guarantee(referent_offset > 0, "referent offset not initialized");
1235
1236 assert(x->number_of_arguments() == 1, "wrong type");
1237
1238 LIRItem reference(x->argument_at(0), this);
1239 reference.load_item();
1240
1241 // need to perform the null check on the reference objecy
1242 CodeEmitInfo* info = NULL;
1243 if (x->needs_null_check()) {
1244 info = state_for(x);
1245 }
1246
1247 LIR_Address* referent_field_adr =
1248 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1249
1250 LIR_Opr result = rlock_result(x);
1251
1252 __ load(referent_field_adr, result, info);
1253
1254 if (UseLoadBarrier) {
1255 load_barrier(result, LIR_OprFact::address(referent_field_adr), lir_patch_none, NULL, true /* weak */);
1256 }
1257
1258 // Register the value in the referent field with the pre-barrier
1259 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1260 result /* pre_val */,
1261 false /* do_load */,
1262 false /* patch */,
1263 NULL /* info */);
1264 }
1265
1266 // Example: clazz.isInstance(object)
1267 void LIRGenerator::do_isInstance(Intrinsic* x) {
1268 assert(x->number_of_arguments() == 2, "wrong type");
1269
1270 // TODO could try to substitute this node with an equivalent InstanceOf
1271 // if clazz is known to be a constant Class. This will pick up newly found
1272 // constants after HIR construction. I'll leave this to a future change.
1273
1274 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1482
1483 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1484 switch (_bs->kind()) {
1485 #if INCLUDE_ALL_GCS
1486 case BarrierSet::G1BarrierSet:
1487 G1SATBCardTableModRef_post_barrier(addr, new_val);
1488 break;
1489 case BarrierSet::Z:
1490 // No post barriers
1491 break;
1492 #endif // INCLUDE_ALL_GCS
1493 case BarrierSet::CardTableModRef:
1494 CardTableModRef_post_barrier(addr, new_val);
1495 break;
1496 default :
1497 ShouldNotReachHere();
1498 }
1499 }
1500
1501 void LIRGenerator::load_barrier(LIR_Opr ref, LIR_Opr ref_addr, LIR_PatchCode patch_code, CodeEmitInfo* info, bool weak) {
1502 assert(UseLoadBarrier, "invariant");
1503 assert(ref->is_register(), "invariant");
1504
1505 __ load_barrier_test(ref);
1506
1507 CodeStub* slow;
1508 if (ref_addr->is_illegal()) {
1509 slow = new LoadBarrierStub(ref, weak);
1510 } else if (ref_addr->is_register()) {
1511 slow = new LoadBarrierStub(ref, ref_addr, weak);
1512 } else {
1513 slow = new LoadBarrierStub(ref, ref_addr, new_pointer_register(), patch_code, info, weak);
1514 }
1515
1516 // Branch to slow path if test failed
1517 __ branch(lir_cond_notEqual, T_ADDRESS, slow);
1518 __ branch_destination(slow->continuation());
1519 }
1520
1521 ////////////////////////////////////////////////////////////////////////
1522 #if INCLUDE_ALL_GCS
1881 // generate_address to try to be smart about emitting the -1.
1882 // Otherwise the patching code won't know how to find the
1883 // instruction to patch.
1884 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1885 } else {
1886 address = generate_address(object.result(), x->offset(), field_type);
1887 }
1888
1889 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1890 __ membar();
1891 }
1892
1893 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1894 if (needs_atomic_access && !needs_patching) {
1895 volatile_field_load(address, reg, info);
1896 } else {
1897 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1898 __ load(address, reg, info, patch_code);
1899 }
1900
1901 if (is_oop && UseLoadBarrier) {
1902 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1903 load_barrier(reg, LIR_OprFact::address(address),
1904 patch_code, (info ? new CodeEmitInfo(info) : NULL));
1905 }
1906
1907 if (is_volatile && os::is_MP()) {
1908 __ membar_acquire();
1909 }
1910 }
1911
1912
1913 //------------------------java.nio.Buffer.checkIndex------------------------
1914
1915 // int java.nio.Buffer.checkIndex(int)
1916 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1917 // NOTE: by the time we are in checkIndex() we are guaranteed that
1918 // the buffer is non-null (because checkIndex is package-private and
1919 // only called from within other methods in the buffer).
1920 assert(x->number_of_arguments() == 2, "wrong type");
1921 LIRItem buf (x->argument_at(0), this);
2011 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), element_type, false);
2012
2013 if (GenerateRangeChecks && needs_range_check) {
2014 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2015 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
2016 } else if (use_length) {
2017 // TODO: use a (modified) version of array_range_check that does not require a
2018 // constant length to be loaded to a register
2019 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2020 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2021 } else {
2022 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2023 // The range check performs the null check, so clear it out for the load
2024 null_check_info = NULL;
2025 }
2026 }
2027
2028 LIR_Opr result = rlock_result(x, element_type);
2029 __ move(array_addr, result, null_check_info);
2030
2031 if (is_oop && UseLoadBarrier) {
2032 load_barrier(result, LIR_OprFact::address(array_addr),
2033 lir_patch_none, (null_check_info ? new CodeEmitInfo(null_check_info) : NULL));
2034 }
2035 }
2036
2037
2038 void LIRGenerator::do_NullCheck(NullCheck* x) {
2039 if (x->can_trap()) {
2040 LIRItem value(x->obj(), this);
2041 value.load_item();
2042 CodeEmitInfo* info = state_for(x);
2043 __ null_check(value.result(), info);
2044 }
2045 }
2046
2047
2048 void LIRGenerator::do_TypeCast(TypeCast* x) {
2049 LIRItem value(x->obj(), this);
2050 value.load_item();
2051 // the result is the same as from the node we are casting
|
1234 guarantee(referent_offset > 0, "referent offset not initialized");
1235
1236 assert(x->number_of_arguments() == 1, "wrong type");
1237
1238 LIRItem reference(x->argument_at(0), this);
1239 reference.load_item();
1240
1241 // need to perform the null check on the reference objecy
1242 CodeEmitInfo* info = NULL;
1243 if (x->needs_null_check()) {
1244 info = state_for(x);
1245 }
1246
1247 LIR_Address* referent_field_adr =
1248 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1249
1250 LIR_Opr result = rlock_result(x);
1251
1252 __ load(referent_field_adr, result, info);
1253
1254 if (UseZGC) {
1255 load_barrier(result, LIR_OprFact::address(referent_field_adr), lir_patch_none, NULL, true /* weak */);
1256 }
1257
1258 // Register the value in the referent field with the pre-barrier
1259 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1260 result /* pre_val */,
1261 false /* do_load */,
1262 false /* patch */,
1263 NULL /* info */);
1264 }
1265
1266 // Example: clazz.isInstance(object)
1267 void LIRGenerator::do_isInstance(Intrinsic* x) {
1268 assert(x->number_of_arguments() == 2, "wrong type");
1269
1270 // TODO could try to substitute this node with an equivalent InstanceOf
1271 // if clazz is known to be a constant Class. This will pick up newly found
1272 // constants after HIR construction. I'll leave this to a future change.
1273
1274 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1482
1483 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1484 switch (_bs->kind()) {
1485 #if INCLUDE_ALL_GCS
1486 case BarrierSet::G1BarrierSet:
1487 G1SATBCardTableModRef_post_barrier(addr, new_val);
1488 break;
1489 case BarrierSet::Z:
1490 // No post barriers
1491 break;
1492 #endif // INCLUDE_ALL_GCS
1493 case BarrierSet::CardTableModRef:
1494 CardTableModRef_post_barrier(addr, new_val);
1495 break;
1496 default :
1497 ShouldNotReachHere();
1498 }
1499 }
1500
1501 void LIRGenerator::load_barrier(LIR_Opr ref, LIR_Opr ref_addr, LIR_PatchCode patch_code, CodeEmitInfo* info, bool weak) {
1502 assert(UseZGC, "invariant");
1503 assert(ref->is_register(), "invariant");
1504
1505 __ load_barrier_test(ref);
1506
1507 CodeStub* slow;
1508 if (ref_addr->is_illegal()) {
1509 slow = new LoadBarrierStub(ref, weak);
1510 } else if (ref_addr->is_register()) {
1511 slow = new LoadBarrierStub(ref, ref_addr, weak);
1512 } else {
1513 slow = new LoadBarrierStub(ref, ref_addr, new_pointer_register(), patch_code, info, weak);
1514 }
1515
1516 // Branch to slow path if test failed
1517 __ branch(lir_cond_notEqual, T_ADDRESS, slow);
1518 __ branch_destination(slow->continuation());
1519 }
1520
1521 ////////////////////////////////////////////////////////////////////////
1522 #if INCLUDE_ALL_GCS
1881 // generate_address to try to be smart about emitting the -1.
1882 // Otherwise the patching code won't know how to find the
1883 // instruction to patch.
1884 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1885 } else {
1886 address = generate_address(object.result(), x->offset(), field_type);
1887 }
1888
1889 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
1890 __ membar();
1891 }
1892
1893 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1894 if (needs_atomic_access && !needs_patching) {
1895 volatile_field_load(address, reg, info);
1896 } else {
1897 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1898 __ load(address, reg, info, patch_code);
1899 }
1900
1901 if (is_oop && UseZGC) {
1902 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1903 load_barrier(reg, LIR_OprFact::address(address),
1904 patch_code, (info ? new CodeEmitInfo(info) : NULL));
1905 }
1906
1907 if (is_volatile && os::is_MP()) {
1908 __ membar_acquire();
1909 }
1910 }
1911
1912
1913 //------------------------java.nio.Buffer.checkIndex------------------------
1914
1915 // int java.nio.Buffer.checkIndex(int)
1916 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1917 // NOTE: by the time we are in checkIndex() we are guaranteed that
1918 // the buffer is non-null (because checkIndex is package-private and
1919 // only called from within other methods in the buffer).
1920 assert(x->number_of_arguments() == 2, "wrong type");
1921 LIRItem buf (x->argument_at(0), this);
2011 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), element_type, false);
2012
2013 if (GenerateRangeChecks && needs_range_check) {
2014 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2015 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
2016 } else if (use_length) {
2017 // TODO: use a (modified) version of array_range_check that does not require a
2018 // constant length to be loaded to a register
2019 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2020 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2021 } else {
2022 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2023 // The range check performs the null check, so clear it out for the load
2024 null_check_info = NULL;
2025 }
2026 }
2027
2028 LIR_Opr result = rlock_result(x, element_type);
2029 __ move(array_addr, result, null_check_info);
2030
2031 if (is_oop && UseZGC) {
2032 load_barrier(result, LIR_OprFact::address(array_addr),
2033 lir_patch_none, (null_check_info ? new CodeEmitInfo(null_check_info) : NULL));
2034 }
2035 }
2036
2037
2038 void LIRGenerator::do_NullCheck(NullCheck* x) {
2039 if (x->can_trap()) {
2040 LIRItem value(x->obj(), this);
2041 value.load_item();
2042 CodeEmitInfo* info = state_for(x);
2043 __ null_check(value.result(), info);
2044 }
2045 }
2046
2047
2048 void LIRGenerator::do_TypeCast(TypeCast* x) {
2049 LIRItem value(x->obj(), this);
2050 value.load_item();
2051 // the result is the same as from the node we are casting
|