24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
28 #include "gc/g1/heapRegion.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/cardTableModRefBS.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/graphKit.hpp"
36 #include "opto/idealKit.hpp"
37 #include "opto/intrinsicnode.hpp"
38 #include "opto/locknode.hpp"
39 #include "opto/machnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/parse.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/sharedRuntime.hpp"
46
47 //----------------------------GraphKit-----------------------------------------
48 // Main utility constructor.
49 GraphKit::GraphKit(JVMState* jvms)
50 : Phase(Phase::Parser),
51 _env(C->env()),
52 _gvn(*C->initial_gvn())
53 {
54 _exceptions = jvms->map()->next_exception();
55 if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);
56 set_jvms(jvms);
57 }
58
59 // Private constructor for parser.
60 GraphKit::GraphKit()
61 : Phase(Phase::Parser),
62 _env(C->env()),
63 _gvn(*C->initial_gvn())
798 return true;
799 assert(len == jvms->loc_size(), "live map consistent with locals map");
800 for (int local = 0; local < len; local++) {
801 if (!live_locals.at(local) && map->local(jvms, local) != top()) {
802 if (PrintMiscellaneous && (Verbose || WizardMode)) {
803 tty->print_cr("Zombie local %d: ", local);
804 jvms->dump();
805 }
806 return false;
807 }
808 }
809 }
810 return true;
811 }
812
813 #endif //ASSERT
814
815 // Helper function for enforcing certain bytecodes to reexecute if
816 // deoptimization happens
817 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
818 ciMethod* cur_method = jvms->method();
819 int cur_bci = jvms->bci();
820 if (cur_method != NULL && cur_bci != InvocationEntryBci) {
821 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
822 return Interpreter::bytecode_should_reexecute(code) ||
823 is_anewarray && code == Bytecodes::_multianewarray;
824 // Reexecute _multianewarray bytecode which was replaced with
825 // sequence of [a]newarray. See Parse::do_multianewarray().
826 //
827 // Note: interpreter should not have it set since this optimization
828 // is limited by dimensions and guarded by flag so in some cases
829 // multianewarray() runtime calls will be generated and
830 // the bytecode should not be reexecutes (stack will not be reset).
831 } else
832 return false;
833 }
834
835 // Helper function for adding JVMState and debug information to node
836 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
837 // Add the safepoint edges to the call (or other safepoint).
838
1136 }
1137 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1138 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1139 return _gvn.transform( new AndLNode(conv, mask) );
1140 }
1141
1142 Node* GraphKit::ConvL2I(Node* offset) {
1143 // short-circuit a common case
1144 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1145 if (offset_con != (jlong)Type::OffsetBot) {
1146 return intcon((int) offset_con);
1147 }
1148 return _gvn.transform( new ConvL2INode(offset));
1149 }
1150
1151 //-------------------------load_object_klass-----------------------------------
1152 Node* GraphKit::load_object_klass(Node* obj) {
1153 // Special-case a fresh allocation to avoid building nodes:
1154 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1155 if (akls != NULL) return akls;
1156 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1157 return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1158 }
1159
1160 //-------------------------load_array_length-----------------------------------
1161 Node* GraphKit::load_array_length(Node* array) {
1162 // Special-case a fresh allocation to avoid building nodes:
1163 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1164 Node *alen;
1165 if (alloc == NULL) {
1166 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1167 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1168 } else {
1169 alen = alloc->Ideal_length();
1170 Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
1171 if (ccast != alen) {
1172 alen = _gvn.transform(ccast);
1173 }
1174 }
1175 return alen;
1176 }
1177
1178 //------------------------------do_null_check----------------------------------
1179 // Helper function to do a NULL pointer check. Returned value is
1180 // the incoming address with NULL casted away. You are allowed to use the
1181 // not-null value only if you are control dependent on the test.
1182 extern int explicit_null_checks_inserted,
1183 explicit_null_checks_elided;
1184 Node* GraphKit::null_check_common(Node* value, BasicType type,
1185 // optional arguments for variations:
1502 record_for_igvn(st);
1503
1504 return st;
1505 }
1506
1507
1508 void GraphKit::pre_barrier(bool do_load,
1509 Node* ctl,
1510 Node* obj,
1511 Node* adr,
1512 uint adr_idx,
1513 Node* val,
1514 const TypeOopPtr* val_type,
1515 Node* pre_val,
1516 BasicType bt) {
1517
1518 BarrierSet* bs = Universe::heap()->barrier_set();
1519 set_control(ctl);
1520 switch (bs->kind()) {
1521 case BarrierSet::G1SATBCTLogging:
1522 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1523 break;
1524
1525 case BarrierSet::CardTableForRS:
1526 case BarrierSet::CardTableExtension:
1527 case BarrierSet::ModRef:
1528 break;
1529
1530 default :
1531 ShouldNotReachHere();
1532
1533 }
1534 }
1535
1536 bool GraphKit::can_move_pre_barrier() const {
1537 BarrierSet* bs = Universe::heap()->barrier_set();
1538 switch (bs->kind()) {
1539 case BarrierSet::G1SATBCTLogging:
1540 return true; // Can move it if no safepoint
1541
1542 case BarrierSet::CardTableForRS:
1543 case BarrierSet::CardTableExtension:
1544 case BarrierSet::ModRef:
1545 return true; // There is no pre-barrier
1546
1547 default :
1548 ShouldNotReachHere();
1549 }
1550 return false;
1551 }
1552
1553 void GraphKit::post_barrier(Node* ctl,
1554 Node* store,
1555 Node* obj,
1556 Node* adr,
1557 uint adr_idx,
1558 Node* val,
1559 BasicType bt,
1560 bool use_precise) {
1561 BarrierSet* bs = Universe::heap()->barrier_set();
1562 set_control(ctl);
1563 switch (bs->kind()) {
1564 case BarrierSet::G1SATBCTLogging:
1565 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1566 break;
1567
1568 case BarrierSet::CardTableForRS:
1569 case BarrierSet::CardTableExtension:
1570 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1571 break;
1572
1573 case BarrierSet::ModRef:
1574 break;
1575
1576 default :
1577 ShouldNotReachHere();
1578
1579 }
1580 }
1581
1582 Node* GraphKit::store_oop(Node* ctl,
1583 Node* obj,
1584 Node* adr,
1585 const TypePtr* adr_type,
1586 Node* val,
1587 const TypeOopPtr* val_type,
1588 BasicType bt,
1589 bool use_precise,
1590 MemNode::MemOrd mo) {
1591 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1592 // could be delayed during Parse (for example, in adjust_map_after_if()).
1593 // Execute transformation here to avoid barrier generation in such case.
1661 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1662 return basic_plus_adr(ary, base, scale);
1663 }
1664
1665 //-------------------------load_array_element-------------------------
1666 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1667 const Type* elemtype = arytype->elem();
1668 BasicType elembt = elemtype->array_element_basic_type();
1669 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1670 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1671 return ld;
1672 }
1673
1674 //-------------------------set_arguments_for_java_call-------------------------
1675 // Arguments (pre-popped from the stack) are taken from the JVMS.
1676 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1677 // Add the call arguments:
1678 uint nargs = call->method()->arg_size();
1679 for (uint i = 0; i < nargs; i++) {
1680 Node* arg = argument(i);
1681 call->init_req(i + TypeFunc::Parms, arg);
1682 }
1683 }
1684
1685 //---------------------------set_edges_for_java_call---------------------------
1686 // Connect a newly created call into the current JVMS.
1687 // A return value node (if any) is returned from set_edges_for_java_call.
1688 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1689
1690 // Add the predefined inputs:
1691 call->init_req( TypeFunc::Control, control() );
1692 call->init_req( TypeFunc::I_O , i_o() );
1693 call->init_req( TypeFunc::Memory , reset_memory() );
1694 call->init_req( TypeFunc::FramePtr, frameptr() );
1695 call->init_req( TypeFunc::ReturnAdr, top() );
1696
1697 add_safepoint_edges(call, must_throw);
1698
1699 Node* xcall = _gvn.transform(call);
1700
2902 // profiling data at this bytecode. Don't lose it, feed it to the
2903 // type system as a speculative type.
2904 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
2905 } else {
2906 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2907 // We may not have profiling here or it may not help us. If we
2908 // have a speculative type use it to perform an exact cast.
2909 ciKlass* spec_obj_type = obj_type->speculative_type();
2910 if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
2911 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
2912 if (stopped()) { // Profile disagrees with this path.
2913 set_control(null_ctl); // Null is the only remaining possibility.
2914 return intcon(0);
2915 }
2916 if (cast_obj != NULL) {
2917 not_null_obj = cast_obj;
2918 }
2919 }
2920 }
2921
2922 // Load the object's klass
2923 Node* obj_klass = load_object_klass(not_null_obj);
2924
2925 // Generate the subtype check
2926 Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);
2927
2928 // Plug in the success path to the general merge in slot 1.
2929 region->init_req(_obj_path, control());
2930 phi ->init_req(_obj_path, intcon(1));
2931
2932 // Plug in the failing path to the general merge in slot 2.
2933 region->init_req(_fail_path, not_subtype_ctrl);
2934 phi ->init_req(_fail_path, intcon(0));
2935
2936 // Return final merged results
2937 set_control( _gvn.transform(region) );
2938 record_for_igvn(region);
2939 return _gvn.transform(phi);
2940 }
2941
2983 "interpreter profiles type checks only for these BCs");
2984 data = method()->method_data()->bci_to_data(bci());
2985 safe_for_replace = true;
2986 }
2987
2988 // Make the merge point
2989 enum { _obj_path = 1, _null_path, PATH_LIMIT };
2990 RegionNode* region = new RegionNode(PATH_LIMIT);
2991 Node* phi = new PhiNode(region, toop);
2992 C->set_has_split_ifs(true); // Has chance for split-if optimization
2993
2994 // Use null-cast information if it is available
2995 bool speculative_not_null = false;
2996 bool never_see_null = ((failure_control == NULL) // regular case only
2997 && seems_never_null(obj, data, speculative_not_null));
2998
2999 // Null check; get casted pointer; set region slot 3
3000 Node* null_ctl = top();
3001 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3002
3003 // If not_null_obj is dead, only null-path is taken
3004 if (stopped()) { // Doing instance-of on a NULL?
3005 set_control(null_ctl);
3006 return null();
3007 }
3008 region->init_req(_null_path, null_ctl);
3009 phi ->init_req(_null_path, null()); // Set null path value
3010 if (null_ctl == top()) {
3011 // Do this eagerly, so that pattern matches like is_diamond_phi
3012 // will work even during parsing.
3013 assert(_null_path == PATH_LIMIT-1, "delete last");
3014 region->del_req(_null_path);
3015 phi ->del_req(_null_path);
3016 }
3017
3018 Node* cast_obj = NULL;
3019 if (tk->klass_is_exact()) {
3020 // The following optimization tries to statically cast the speculative type of the object
3021 // (for example obtained during profiling) to the type of the superklass and then do a
3022 // dynamic check that the type of the object is what we expect. To work correctly
3132 } else {
3133 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3134 }
3135 return membar;
3136 }
3137
3138 //------------------------------shared_lock------------------------------------
3139 // Emit locking code.
3140 FastLockNode* GraphKit::shared_lock(Node* obj) {
3141 // bci is either a monitorenter bc or InvocationEntryBci
3142 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3143 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3144
3145 if( !GenerateSynchronizationCode )
3146 return NULL; // Not locking things?
3147 if (stopped()) // Dead monitor?
3148 return NULL;
3149
3150 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3151
3152 // Box the stack location
3153 Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3154 Node* mem = reset_memory();
3155
3156 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3157 if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3158 // Create the counters for this fast lock.
3159 flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3160 }
3161
3162 // Create the rtm counters for this fast lock if needed.
3163 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3164
3165 // Add monitor to debug info for the slow path. If we block inside the
3166 // slow path and de-opt, we need the monitor hanging around
3167 map()->push_monitor( flock );
3168
3169 const TypeFunc *tf = LockNode::lock_type();
3170 LockNode *lock = new LockNode(C, tf);
3171
3605 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3606 if (ccast != length) {
3607 _gvn.set_type_bottom(ccast);
3608 record_for_igvn(ccast);
3609 replace_in_map(length, ccast);
3610 }
3611 }
3612
3613 return javaoop;
3614 }
3615
3616 // The following "Ideal_foo" functions are placed here because they recognize
3617 // the graph shapes created by the functions immediately above.
3618
3619 //---------------------------Ideal_allocation----------------------------------
3620 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3621 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3622 if (ptr == NULL) { // reduce dumb test in callers
3623 return NULL;
3624 }
3625 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3626 ptr = ptr->in(1);
3627 if (ptr == NULL) return NULL;
3628 }
3629 // Return NULL for allocations with several casts:
3630 // j.l.reflect.Array.newInstance(jobject, jint)
3631 // Object.clone()
3632 // to keep more precise type from last cast.
3633 if (ptr->is_Proj()) {
3634 Node* allo = ptr->in(0);
3635 if (allo != NULL && allo->is_Allocate()) {
3636 return allo->as_Allocate();
3637 }
3638 }
3639 // Report failure to match.
3640 return NULL;
3641 }
3642
3643 // Fancy version which also strips off an offset (and reports it to caller).
3644 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
4257 } __ end_if();
4258 } else {
4259 // Object.clone() instrinsic uses this path.
4260 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4261 }
4262
4263 // Final sync IdealKit and GraphKit.
4264 final_sync(ideal);
4265 }
4266 #undef __
4267
4268
4269
4270 Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
4271 if (java_lang_String::has_offset_field()) {
4272 int offset_offset = java_lang_String::offset_offset_in_bytes();
4273 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4274 false, NULL, 0);
4275 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4276 int offset_field_idx = C->get_alias_index(offset_field_type);
4277 return make_load(ctrl,
4278 basic_plus_adr(str, str, offset_offset),
4279 TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
4280 } else {
4281 return intcon(0);
4282 }
4283 }
4284
4285 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4286 if (java_lang_String::has_count_field()) {
4287 int count_offset = java_lang_String::count_offset_in_bytes();
4288 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4289 false, NULL, 0);
4290 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4291 int count_field_idx = C->get_alias_index(count_field_type);
4292 return make_load(ctrl,
4293 basic_plus_adr(str, str, count_offset),
4294 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4295 } else {
4296 return load_array_length(load_String_value(ctrl, str));
4297 }
4298 }
4299
4300 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4301 int value_offset = java_lang_String::value_offset_in_bytes();
4302 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4303 false, NULL, 0);
4304 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4305 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4306 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4307 ciTypeArrayKlass::make(T_CHAR), true, 0);
4308 int value_field_idx = C->get_alias_index(value_field_type);
4309 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4310 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4311 // String.value field is known to be @Stable.
4312 if (UseImplicitStableValues) {
4313 load = cast_array_to_stable(load, value_type);
4314 }
4315 return load;
4316 }
4317
4318 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4319 int offset_offset = java_lang_String::offset_offset_in_bytes();
4320 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4321 false, NULL, 0);
4322 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4323 int offset_field_idx = C->get_alias_index(offset_field_type);
4324 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4325 value, T_INT, offset_field_idx, MemNode::unordered);
4326 }
4327
4328 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4329 int value_offset = java_lang_String::value_offset_in_bytes();
4330 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4331 false, NULL, 0);
4332 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4333
4334 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4335 value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
4336 }
4337
4338 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4339 int count_offset = java_lang_String::count_offset_in_bytes();
4340 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4341 false, NULL, 0);
4342 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4343 int count_field_idx = C->get_alias_index(count_field_type);
4344 store_to_memory(ctrl, basic_plus_adr(str, count_offset),
4345 value, T_INT, count_field_idx, MemNode::unordered);
4346 }
4347
4348 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4349 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4350 // assumption of CCP analysis.
4351 return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4352 }
|
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
28 #include "gc/g1/heapRegion.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/cardTableModRefBS.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/graphKit.hpp"
36 #include "opto/idealKit.hpp"
37 #include "opto/intrinsicnode.hpp"
38 #include "opto/locknode.hpp"
39 #include "opto/machnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/parse.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/shenandoahSupport.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/sharedRuntime.hpp"
47
48 //----------------------------GraphKit-----------------------------------------
49 // Main utility constructor.
50 GraphKit::GraphKit(JVMState* jvms)
51 : Phase(Phase::Parser),
52 _env(C->env()),
53 _gvn(*C->initial_gvn())
54 {
55 _exceptions = jvms->map()->next_exception();
56 if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);
57 set_jvms(jvms);
58 }
59
60 // Private constructor for parser.
61 GraphKit::GraphKit()
62 : Phase(Phase::Parser),
63 _env(C->env()),
64 _gvn(*C->initial_gvn())
799 return true;
800 assert(len == jvms->loc_size(), "live map consistent with locals map");
801 for (int local = 0; local < len; local++) {
802 if (!live_locals.at(local) && map->local(jvms, local) != top()) {
803 if (PrintMiscellaneous && (Verbose || WizardMode)) {
804 tty->print_cr("Zombie local %d: ", local);
805 jvms->dump();
806 }
807 return false;
808 }
809 }
810 }
811 return true;
812 }
813
814 #endif //ASSERT
815
816 // Helper function for enforcing certain bytecodes to reexecute if
817 // deoptimization happens
818 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
819 ciMethod* cur_method = jvms->has_method() ? jvms->method() : NULL;
820 int cur_bci = jvms->bci();
821 if (cur_method != NULL && cur_bci != InvocationEntryBci) {
822 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
823 return Interpreter::bytecode_should_reexecute(code) ||
824 is_anewarray && code == Bytecodes::_multianewarray;
825 // Reexecute _multianewarray bytecode which was replaced with
826 // sequence of [a]newarray. See Parse::do_multianewarray().
827 //
828 // Note: interpreter should not have it set since this optimization
829 // is limited by dimensions and guarded by flag so in some cases
830 // multianewarray() runtime calls will be generated and
831 // the bytecode should not be reexecutes (stack will not be reset).
832 } else
833 return false;
834 }
835
836 // Helper function for adding JVMState and debug information to node
837 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
838 // Add the safepoint edges to the call (or other safepoint).
839
1137 }
1138 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1139 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1140 return _gvn.transform( new AndLNode(conv, mask) );
1141 }
1142
1143 Node* GraphKit::ConvL2I(Node* offset) {
1144 // short-circuit a common case
1145 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1146 if (offset_con != (jlong)Type::OffsetBot) {
1147 return intcon((int) offset_con);
1148 }
1149 return _gvn.transform( new ConvL2INode(offset));
1150 }
1151
1152 //-------------------------load_object_klass-----------------------------------
1153 Node* GraphKit::load_object_klass(Node* obj) {
1154 // Special-case a fresh allocation to avoid building nodes:
1155 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1156 if (akls != NULL) return akls;
1157 if (ShenandoahVerifyReadsToFromSpace) {
1158 obj = shenandoah_read_barrier(obj);
1159 }
1160 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1161 return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1162 }
1163
1164 //-------------------------load_array_length-----------------------------------
1165 Node* GraphKit::load_array_length(Node* array) {
1166 // Special-case a fresh allocation to avoid building nodes:
1167 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1168 Node *alen;
1169 if (alloc == NULL) {
1170 if (ShenandoahVerifyReadsToFromSpace) {
1171 array = shenandoah_read_barrier(array);
1172 }
1173
1174 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1175 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1176 } else {
1177 alen = alloc->Ideal_length();
1178 Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
1179 if (ccast != alen) {
1180 alen = _gvn.transform(ccast);
1181 }
1182 }
1183 return alen;
1184 }
1185
1186 //------------------------------do_null_check----------------------------------
1187 // Helper function to do a NULL pointer check. Returned value is
1188 // the incoming address with NULL casted away. You are allowed to use the
1189 // not-null value only if you are control dependent on the test.
1190 extern int explicit_null_checks_inserted,
1191 explicit_null_checks_elided;
1192 Node* GraphKit::null_check_common(Node* value, BasicType type,
1193 // optional arguments for variations:
1510 record_for_igvn(st);
1511
1512 return st;
1513 }
1514
1515
1516 void GraphKit::pre_barrier(bool do_load,
1517 Node* ctl,
1518 Node* obj,
1519 Node* adr,
1520 uint adr_idx,
1521 Node* val,
1522 const TypeOopPtr* val_type,
1523 Node* pre_val,
1524 BasicType bt) {
1525
1526 BarrierSet* bs = Universe::heap()->barrier_set();
1527 set_control(ctl);
1528 switch (bs->kind()) {
1529 case BarrierSet::G1SATBCTLogging:
1530 case BarrierSet::ShenandoahBarrierSet:
1531 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1532 break;
1533
1534 case BarrierSet::CardTableForRS:
1535 case BarrierSet::CardTableExtension:
1536 case BarrierSet::ModRef:
1537 break;
1538
1539 default :
1540 ShouldNotReachHere();
1541
1542 }
1543 }
1544
1545 bool GraphKit::can_move_pre_barrier() const {
1546 BarrierSet* bs = Universe::heap()->barrier_set();
1547 switch (bs->kind()) {
1548 case BarrierSet::G1SATBCTLogging:
1549 case BarrierSet::ShenandoahBarrierSet:
1550 return true; // Can move it if no safepoint
1551
1552 case BarrierSet::CardTableForRS:
1553 case BarrierSet::CardTableExtension:
1554 case BarrierSet::ModRef:
1555 return true; // There is no pre-barrier
1556
1557 default :
1558 ShouldNotReachHere();
1559 }
1560 return false;
1561 }
1562
1563 void GraphKit::post_barrier(Node* ctl,
1564 Node* store,
1565 Node* obj,
1566 Node* adr,
1567 uint adr_idx,
1568 Node* val,
1569 BasicType bt,
1570 bool use_precise) {
1571 BarrierSet* bs = Universe::heap()->barrier_set();
1572 set_control(ctl);
1573 switch (bs->kind()) {
1574 case BarrierSet::G1SATBCTLogging:
1575 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1576 break;
1577
1578 case BarrierSet::CardTableForRS:
1579 case BarrierSet::CardTableExtension:
1580 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1581 break;
1582
1583 case BarrierSet::ModRef:
1584 case BarrierSet::ShenandoahBarrierSet:
1585 break;
1586
1587 default :
1588 ShouldNotReachHere();
1589
1590 }
1591 }
1592
1593 Node* GraphKit::store_oop(Node* ctl,
1594 Node* obj,
1595 Node* adr,
1596 const TypePtr* adr_type,
1597 Node* val,
1598 const TypeOopPtr* val_type,
1599 BasicType bt,
1600 bool use_precise,
1601 MemNode::MemOrd mo) {
1602 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1603 // could be delayed during Parse (for example, in adjust_map_after_if()).
1604 // Execute transformation here to avoid barrier generation in such case.
1672 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1673 return basic_plus_adr(ary, base, scale);
1674 }
1675
1676 //-------------------------load_array_element-------------------------
1677 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1678 const Type* elemtype = arytype->elem();
1679 BasicType elembt = elemtype->array_element_basic_type();
1680 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1681 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1682 return ld;
1683 }
1684
1685 //-------------------------set_arguments_for_java_call-------------------------
1686 // Arguments (pre-popped from the stack) are taken from the JVMS.
1687 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1688 // Add the call arguments:
1689 uint nargs = call->method()->arg_size();
1690 for (uint i = 0; i < nargs; i++) {
1691 Node* arg = argument(i);
1692 if (ShenandoahVerifyReadsToFromSpace && call->is_CallDynamicJava() && i == 0) {
1693 arg = shenandoah_read_barrier(arg);
1694 }
1695 call->init_req(i + TypeFunc::Parms, arg);
1696 }
1697 }
1698
1699 //---------------------------set_edges_for_java_call---------------------------
1700 // Connect a newly created call into the current JVMS.
1701 // A return value node (if any) is returned from set_edges_for_java_call.
1702 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1703
1704 // Add the predefined inputs:
1705 call->init_req( TypeFunc::Control, control() );
1706 call->init_req( TypeFunc::I_O , i_o() );
1707 call->init_req( TypeFunc::Memory , reset_memory() );
1708 call->init_req( TypeFunc::FramePtr, frameptr() );
1709 call->init_req( TypeFunc::ReturnAdr, top() );
1710
1711 add_safepoint_edges(call, must_throw);
1712
1713 Node* xcall = _gvn.transform(call);
1714
2916 // profiling data at this bytecode. Don't lose it, feed it to the
2917 // type system as a speculative type.
2918 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
2919 } else {
2920 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2921 // We may not have profiling here or it may not help us. If we
2922 // have a speculative type use it to perform an exact cast.
2923 ciKlass* spec_obj_type = obj_type->speculative_type();
2924 if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
2925 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
2926 if (stopped()) { // Profile disagrees with this path.
2927 set_control(null_ctl); // Null is the only remaining possibility.
2928 return intcon(0);
2929 }
2930 if (cast_obj != NULL) {
2931 not_null_obj = cast_obj;
2932 }
2933 }
2934 }
2935
2936 if (ShenandoahVerifyReadsToFromSpace) {
2937 not_null_obj = shenandoah_read_barrier(not_null_obj);
2938 }
2939
2940 // Load the object's klass
2941 Node* obj_klass = load_object_klass(not_null_obj);
2942
2943 // Generate the subtype check
2944 Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);
2945
2946 // Plug in the success path to the general merge in slot 1.
2947 region->init_req(_obj_path, control());
2948 phi ->init_req(_obj_path, intcon(1));
2949
2950 // Plug in the failing path to the general merge in slot 2.
2951 region->init_req(_fail_path, not_subtype_ctrl);
2952 phi ->init_req(_fail_path, intcon(0));
2953
2954 // Return final merged results
2955 set_control( _gvn.transform(region) );
2956 record_for_igvn(region);
2957 return _gvn.transform(phi);
2958 }
2959
3001 "interpreter profiles type checks only for these BCs");
3002 data = method()->method_data()->bci_to_data(bci());
3003 safe_for_replace = true;
3004 }
3005
3006 // Make the merge point
3007 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3008 RegionNode* region = new RegionNode(PATH_LIMIT);
3009 Node* phi = new PhiNode(region, toop);
3010 C->set_has_split_ifs(true); // Has chance for split-if optimization
3011
3012 // Use null-cast information if it is available
3013 bool speculative_not_null = false;
3014 bool never_see_null = ((failure_control == NULL) // regular case only
3015 && seems_never_null(obj, data, speculative_not_null));
3016
3017 // Null check; get casted pointer; set region slot 3
3018 Node* null_ctl = top();
3019 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3020
3021 if (ShenandoahVerifyReadsToFromSpace) {
3022 not_null_obj = shenandoah_read_barrier(not_null_obj);
3023 }
3024
3025 // If not_null_obj is dead, only null-path is taken
3026 if (stopped()) { // Doing instance-of on a NULL?
3027 set_control(null_ctl);
3028 return null();
3029 }
3030 region->init_req(_null_path, null_ctl);
3031 phi ->init_req(_null_path, null()); // Set null path value
3032 if (null_ctl == top()) {
3033 // Do this eagerly, so that pattern matches like is_diamond_phi
3034 // will work even during parsing.
3035 assert(_null_path == PATH_LIMIT-1, "delete last");
3036 region->del_req(_null_path);
3037 phi ->del_req(_null_path);
3038 }
3039
3040 Node* cast_obj = NULL;
3041 if (tk->klass_is_exact()) {
3042 // The following optimization tries to statically cast the speculative type of the object
3043 // (for example obtained during profiling) to the type of the superklass and then do a
3044 // dynamic check that the type of the object is what we expect. To work correctly
3154 } else {
3155 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3156 }
3157 return membar;
3158 }
3159
3160 //------------------------------shared_lock------------------------------------
3161 // Emit locking code.
3162 FastLockNode* GraphKit::shared_lock(Node* obj) {
3163 // bci is either a monitorenter bc or InvocationEntryBci
3164 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3165 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3166
3167 if( !GenerateSynchronizationCode )
3168 return NULL; // Not locking things?
3169 if (stopped()) // Dead monitor?
3170 return NULL;
3171
3172 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3173
3174 obj = shenandoah_write_barrier(obj);
3175
3176 // Box the stack location
3177 Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3178 Node* mem = reset_memory();
3179
3180 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3181 if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3182 // Create the counters for this fast lock.
3183 flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3184 }
3185
3186 // Create the rtm counters for this fast lock if needed.
3187 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3188
3189 // Add monitor to debug info for the slow path. If we block inside the
3190 // slow path and de-opt, we need the monitor hanging around
3191 map()->push_monitor( flock );
3192
3193 const TypeFunc *tf = LockNode::lock_type();
3194 LockNode *lock = new LockNode(C, tf);
3195
3629 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3630 if (ccast != length) {
3631 _gvn.set_type_bottom(ccast);
3632 record_for_igvn(ccast);
3633 replace_in_map(length, ccast);
3634 }
3635 }
3636
3637 return javaoop;
3638 }
3639
3640 // The following "Ideal_foo" functions are placed here because they recognize
3641 // the graph shapes created by the functions immediately above.
3642
3643 //---------------------------Ideal_allocation----------------------------------
3644 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3645 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3646 if (ptr == NULL) { // reduce dumb test in callers
3647 return NULL;
3648 }
3649
3650 // Attempt to see through Shenandoah barriers.
3651 ptr = ShenandoahBarrierNode::skip_through_barrier(ptr);
3652
3653 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3654 ptr = ptr->in(1);
3655 if (ptr == NULL) return NULL;
3656 }
3657 // Return NULL for allocations with several casts:
3658 // j.l.reflect.Array.newInstance(jobject, jint)
3659 // Object.clone()
3660 // to keep more precise type from last cast.
3661 if (ptr->is_Proj()) {
3662 Node* allo = ptr->in(0);
3663 if (allo != NULL && allo->is_Allocate()) {
3664 return allo->as_Allocate();
3665 }
3666 }
3667 // Report failure to match.
3668 return NULL;
3669 }
3670
3671 // Fancy version which also strips off an offset (and reports it to caller).
3672 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
4285 } __ end_if();
4286 } else {
4287 // Object.clone() instrinsic uses this path.
4288 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4289 }
4290
4291 // Final sync IdealKit and GraphKit.
4292 final_sync(ideal);
4293 }
4294 #undef __
4295
4296
4297
4298 Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
4299 if (java_lang_String::has_offset_field()) {
4300 int offset_offset = java_lang_String::offset_offset_in_bytes();
4301 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4302 false, NULL, 0);
4303 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4304 int offset_field_idx = C->get_alias_index(offset_field_type);
4305
4306 str = shenandoah_read_barrier(str);
4307
4308 return make_load(ctrl,
4309 basic_plus_adr(str, str, offset_offset),
4310 TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
4311 } else {
4312 return intcon(0);
4313 }
4314 }
4315
4316 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4317 if (java_lang_String::has_count_field()) {
4318 int count_offset = java_lang_String::count_offset_in_bytes();
4319 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4320 false, NULL, 0);
4321 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4322 int count_field_idx = C->get_alias_index(count_field_type);
4323
4324 str = shenandoah_read_barrier(str);
4325
4326 return make_load(ctrl,
4327 basic_plus_adr(str, str, count_offset),
4328 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4329 } else {
4330 return load_array_length(load_String_value(ctrl, str));
4331 }
4332 }
4333
4334 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4335 int value_offset = java_lang_String::value_offset_in_bytes();
4336 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4337 false, NULL, 0);
4338 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4339 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4340 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4341 ciTypeArrayKlass::make(T_CHAR), true, 0);
4342 int value_field_idx = C->get_alias_index(value_field_type);
4343
4344 str = shenandoah_read_barrier(str);
4345
4346 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4347 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4348 // String.value field is known to be @Stable.
4349 if (UseImplicitStableValues) {
4350 load = cast_array_to_stable(load, value_type);
4351 }
4352 return load;
4353 }
4354
4355 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4356 int offset_offset = java_lang_String::offset_offset_in_bytes();
4357 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4358 false, NULL, 0);
4359 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4360 int offset_field_idx = C->get_alias_index(offset_field_type);
4361
4362 // TODO: Use incoming ctrl.
4363 str = shenandoah_write_barrier(str);
4364
4365 store_to_memory(UseShenandoahGC ? control() : ctrl, basic_plus_adr(str, offset_offset),
4366 value, T_INT, offset_field_idx, MemNode::unordered);
4367 }
4368
4369 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4370 int value_offset = java_lang_String::value_offset_in_bytes();
4371 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4372 false, NULL, 0);
4373 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4374
4375 // TODO: Use incoming ctrl.
4376 str = shenandoah_write_barrier(str);
4377 value = shenandoah_read_barrier_nomem(value);
4378
4379 store_oop_to_object(UseShenandoahGC ? control() : ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4380 value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
4381 }
4382
4383 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4384 int count_offset = java_lang_String::count_offset_in_bytes();
4385 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4386 false, NULL, 0);
4387 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4388 int count_field_idx = C->get_alias_index(count_field_type);
4389
4390 // TODO: Use incoming ctrl.
4391 str = shenandoah_write_barrier(str);
4392
4393 store_to_memory(UseShenandoahGC ? control() : ctrl, basic_plus_adr(str, count_offset),
4394 value, T_INT, count_field_idx, MemNode::unordered);
4395 }
4396
4397 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4398 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4399 // assumption of CCP analysis.
4400 return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4401 }
4402
4403 Node* GraphKit::shenandoah_read_barrier(Node* obj) {
4404 return shenandoah_read_barrier_impl(obj, false, true);
4405 }
4406
4407 Node* GraphKit::shenandoah_read_barrier_nomem(Node* obj) {
4408 return shenandoah_read_barrier_impl(obj, false, false);
4409 }
4410
4411 Node* GraphKit::shenandoah_read_barrier_impl(Node* obj, bool use_ctrl, bool use_mem) {
4412
4413 if (UseShenandoahGC && ShenandoahReadBarrier) {
4414 const Type* obj_type = obj->bottom_type();
4415 if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
4416 // tty->print_cr("killed barrier for NULL object");
4417 return obj;
4418 }
4419 const TypePtr* adr_type = obj_type->is_ptr()->add_offset(-8);
4420 Node* mem = use_mem ? memory(adr_type) : immutable_memory();
4421
4422 if (! ShenandoahBarrierNode::needs_barrier(&_gvn, NULL, obj, mem)) {
4423 // We know it is null, no barrier needed.
4424 return obj;
4425 }
4426
4427
4428 if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
4429
4430 // We don't know if it's null or not. Need null-check.
4431 enum { _not_null_path = 1, _null_path, PATH_LIMIT };
4432 RegionNode* region = new RegionNode(PATH_LIMIT);
4433 Node* phi = new PhiNode(region, obj_type);
4434 Node* null_ctrl = top();
4435 Node* not_null_obj = null_check_oop(obj, &null_ctrl);
4436
4437 region->init_req(_null_path, null_ctrl);
4438 phi ->init_req(_null_path, obj);
4439
4440 Node* ctrl = use_ctrl ? control() : NULL;
4441 ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj);
4442 Node* n = _gvn.transform(rb);
4443
4444 region->init_req(_not_null_path, control());
4445 phi ->init_req(_not_null_path, n);
4446
4447 set_control(_gvn.transform(region));
4448 record_for_igvn(region);
4449 return _gvn.transform(phi);
4450
4451 } else {
4452 // We know it is not null. Simple barrier is sufficient.
4453 Node* ctrl = use_ctrl ? control() : NULL;
4454 ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj);
4455 Node* n = _gvn.transform(rb);
4456 record_for_igvn(n);
4457 return n;
4458 }
4459
4460 } else {
4461 return obj;
4462 }
4463 }
4464
4465 Node* GraphKit::shenandoah_write_barrier(Node* obj) {
4466
4467 if (UseShenandoahGC && ShenandoahWriteBarrier) {
4468
4469 if (! ShenandoahBarrierNode::needs_barrier(&_gvn, NULL, obj, NULL)) {
4470 return obj;
4471 }
4472 const Type* obj_type = obj->bottom_type();
4473 const TypePtr* adr_type = obj_type->is_ptr()->add_offset(-8);
4474 // tty->print_cr("memory at:");
4475 // adr_type->dump();
4476 // tty->print_cr("\n");
4477 // memory(adr_type)->dump();
4478 if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
4479 // We don't know if it's null or not. Need null-check.
4480 enum { _not_null_path = 1, _null_path, PATH_LIMIT };
4481 RegionNode* region = new RegionNode(PATH_LIMIT);
4482 Node* phi = new PhiNode(region, obj_type);
4483 Node* memphi = PhiNode::make(region, memory(adr_type), Type::MEMORY, C->alias_type(adr_type)->adr_type());
4484
4485 Node* prev_mem = memory(adr_type);
4486 Node* null_ctrl = top();
4487 Node* not_null_obj = null_check_oop(obj, &null_ctrl);
4488
4489 region->init_req(_null_path, null_ctrl);
4490 phi ->init_req(_null_path, null());
4491 memphi->init_req(_null_path, prev_mem);
4492
4493 ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(NULL, memory(adr_type), not_null_obj);
4494 Node* n = _gvn.transform(wb);
4495 if (n == wb) { // New barrier needs memory projection.
4496 Node* proj = _gvn.transform(new ShenandoahWBMemProjNode(n));
4497 set_memory(proj, adr_type);
4498 }
4499
4500 region->init_req(_not_null_path, control());
4501 phi ->init_req(_not_null_path, n);
4502 memphi->init_req(_not_null_path, memory(adr_type));
4503
4504 set_control(_gvn.transform(region));
4505 record_for_igvn(region);
4506 set_memory(_gvn.transform(memphi), adr_type);
4507
4508 Node* res_val = _gvn.transform(phi);
4509 // replace_in_map(obj, res_val);
4510 return res_val;
4511 } else {
4512 // We know it is not null. Simple barrier is sufficient.
4513 ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(NULL, memory(adr_type), obj);
4514 Node* n = _gvn.transform(wb);
4515 if (n == wb) {
4516 Node* proj = _gvn.transform(new ShenandoahWBMemProjNode(wb));
4517 set_memory(proj, adr_type);
4518 }
4519 // replace_in_map(obj, n);
4520 record_for_igvn(n);
4521 return n;
4522 }
4523
4524 } else {
4525 return obj;
4526 }
4527 }
4528
4529 void GraphKit::shenandoah_acmp_barrier(Node*& a, Node*& b) {
4530 if (UseShenandoahGC) {
4531 const Type* a_type = a->bottom_type();
4532 const Type* b_type = b->bottom_type();
4533 if (a_type->higher_equal(TypePtr::NULL_PTR) || b_type->higher_equal(TypePtr::NULL_PTR)) {
4534 // We know one arg is gonna be null. No need for barriers.
4535 // tty->print_cr("eliminate acmp barrier on null");
4536 return;
4537 }
4538 /*
4539 if ((!a_type->isa_oopptr()) || (!b_type->isa_oopptr())) {
4540 a_type->dump();
4541 b_type->dump();
4542 }
4543 */
4544 if (a_type->is_oopptr()->const_oop() != NULL && b_type->is_oopptr()->const_oop() != NULL ) {
4545 // We know one arg is inlined constant. No need for barriers.
4546 // tty->print_cr("eliminate acmp barrier on constant");
4547 return;
4548 }
4549 if (a->Opcode() == Op_ShenandoahWriteBarrier && b->Opcode() == Op_ShenandoahWriteBarrier) {
4550 // We know one arg is already write-barrier'd. No need for barriers.
4551 // tty->print_cr("eliminate acmp barrier on write barrier");
4552 return;
4553 }
4554 if (AllocateNode::Ideal_allocation(a, &_gvn) != NULL || AllocateNode::Ideal_allocation(b, &_gvn) != NULL) {
4555 // We know one arg is already in to-space. No need for barriers.
4556 // tty->print_cr("eliminate acmp barrier on new obj");
4557 return;
4558 }
4559
4560 enum { _equal = 1, _not_equal, PATH_LIMIT };
4561 RegionNode* region = new RegionNode(PATH_LIMIT);
4562 PhiNode* phiA = PhiNode::make(region, a);
4563 PhiNode* phiB = PhiNode::make(region, b);
4564
4565 Node* cmp = _gvn.transform(new CmpPNode(b, a));
4566 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
4567
4568 // TODO: Use profiling data.
4569 IfNode* iff = create_and_map_if(control(), tst, PROB_FAIR, COUNT_UNKNOWN);
4570 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
4571 Node* iffalse = _gvn.transform(new IfFalseNode(iff));
4572
4573 // Equal path: Use original values.
4574 region->init_req(_equal, iftrue);
4575 phiA->init_req(_equal, a);
4576 phiB->init_req(_equal, b);
4577
4578 // Unequal path: retry after read barriers.
4579 set_control(iffalse);
4580 a = shenandoah_read_barrier_impl(a, true, true);
4581 b = shenandoah_read_barrier_impl(b, true, true);
4582
4583 region->init_req(_not_equal, control());
4584 phiA->init_req(_not_equal, a);
4585 phiB->init_req(_not_equal, b);
4586
4587 set_control(_gvn.transform(region));
4588 record_for_igvn(region);
4589
4590 a = _gvn.transform(phiA);
4591 b = _gvn.transform(phiB);
4592 }
4593 }
|