--- old/src/hotspot/share/opto/parse2.cpp 2019-03-11 14:26:54.598354485 +0100 +++ new/src/hotspot/share/opto/parse2.cpp 2019-03-11 14:26:54.386354488 +0100 @@ -36,12 +36,14 @@ #include "opto/convertnode.hpp" #include "opto/divnode.hpp" #include "opto/idealGraphPrinter.hpp" +#include "opto/idealKit.hpp" #include "opto/matcher.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" @@ -53,58 +55,224 @@ //---------------------------------array_load---------------------------------- void Parse::array_load(BasicType bt) { const Type* elemtype = Type::TOP; - bool big_val = bt == T_DOUBLE || bt == T_LONG; Node* adr = array_addressing(bt, 0, &elemtype); if (stopped()) return; // guaranteed null or range check - pop(); // index (already used) - Node* array = pop(); // the array itself + Node* idx = pop(); + Node* ary = pop(); + + // Handle value type arrays + const TypeOopPtr* elemptr = elemtype->make_oopptr(); + const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); + if (elemtype->isa_valuetype() != NULL) { + // Load from flattened value type array + ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); + Node* vt = ValueTypeNode::make_from_flattened(this, vk, ary, adr); + push(vt); + return; + } else if (elemptr != NULL && elemptr->is_valuetypeptr()) { + // Load from non-flattened value type array (elements can never be null) + bt = T_VALUETYPE; + assert(elemptr->meet(TypePtr::NULL_PTR) != elemptr, "value type array elements should never be null"); + } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && + !ary_t->klass_is_exact()) { + // Cannot statically determine if array is flattened, emit runtime check + IdealKit ideal(this); + IdealVariable res(ideal); + ideal.declarations_done(); + Node* kls = load_object_klass(ary); + Node* tag = load_lh_array_tag(kls); + ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { + // non flattened + sync_kit(ideal); + const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); + elemtype = ary_t->elem()->make_oopptr(); + Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, + IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); + ideal.sync_kit(this); + ideal.set(res, ld); + } ideal.else_(); { + // flattened + sync_kit(ideal); + Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); + Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); + Node* obj_size = NULL; + kill_dead_locals(); + inc_sp(2); + Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); + dec_sp(2); + + AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); + assert(alloc->maybe_set_complete(&_gvn), ""); + alloc->initialization()->set_complete_with_arraycopy(); + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + // Unknown value type so might have reference fields + if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { + int base_off = sizeof(instanceOopDesc); + Node* dst_base = basic_plus_adr(alloc_obj, base_off); + Node* countx = obj_size; + countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); + + assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); + Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); + Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); + uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); + Node* base = basic_plus_adr(ary, header); + idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); + Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); + Node* adr = basic_plus_adr(ary, base, scale); + + access_clone(adr, dst_base, countx, false); + } else { + ideal.sync_kit(this); + ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), + CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), + "load_unknown_value", + ary, idx, alloc_obj); + sync_kit(ideal); + } + + insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); + + ideal.sync_kit(this); + ideal.set(res, alloc_obj); + } ideal.end_if(); + sync_kit(ideal); + push_node(bt, ideal.value(res)); + return; + } if (elemtype == TypeInt::BOOL) { bt = T_BOOLEAN; } else if (bt == T_OBJECT) { - elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); + elemtype = ary_t->elem()->make_oopptr(); } const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); - - Node* ld = access_load_at(array, adr, adr_type, elemtype, bt, + Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); - if (big_val) { - push_pair(ld); - } else { - push(ld); + if (bt == T_VALUETYPE) { + // Loading a non-flattened (but flattenable) value type from an array + assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); + if (elemptr->value_klass()->is_scalarizable()) { + ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); + } } + + push_node(bt, ld); } //--------------------------------array_store---------------------------------- void Parse::array_store(BasicType bt) { const Type* elemtype = Type::TOP; - bool big_val = bt == T_DOUBLE || bt == T_LONG; - Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype); + Node* adr = array_addressing(bt, type2size[bt], &elemtype); if (stopped()) return; // guaranteed null or range check + Node* cast_val = NULL; if (bt == T_OBJECT) { - array_store_check(); + cast_val = array_store_check(); + if (stopped()) return; } - Node* val; // Oop to store - if (big_val) { - val = pop_pair(); - } else { - val = pop(); + Node* val = pop_node(bt); // Value to store + Node* idx = pop(); // Index in the array + Node* ary = pop(); // The array itself + + const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); + if (bt == T_OBJECT) { + const TypeOopPtr* elemptr = elemtype->make_oopptr(); + const Type* val_t = _gvn.type(val); + if (elemtype->isa_valuetype() != NULL) { + // Store to flattened value type array + if (!cast_val->is_ValueType()) { + inc_sp(3); + cast_val = null_check(cast_val); + if (stopped()) return; + dec_sp(3); + cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->is_valuetype()->value_klass()); + } + cast_val->as_ValueType()->store_flattened(this, ary, adr); + return; + } else if (elemptr->is_valuetypeptr()) { + // Store to non-flattened value type array + if (!cast_val->is_ValueType()) { + // Can not store null into a value type array + inc_sp(3); + cast_val = null_check(cast_val); + if (stopped()) return; + dec_sp(3); + } + } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() && + (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { + if (ValueArrayFlatten) { + IdealKit ideal(this); + Node* kls = load_object_klass(ary); + Node* layout_val = load_lh_array_tag(kls); + ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { + // non flattened + sync_kit(ideal); + + if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { + gen_value_type_array_guard(ary, val, 3); + } + + const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); + elemtype = ary_t->elem()->make_oopptr(); + access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); + ideal.sync_kit(this); + } ideal.else_(); { + // flattened + // Object/interface array must be flattened, cast it + if (val->is_ValueType()) { + sync_kit(ideal); + const TypeValueType* vt = _gvn.type(val)->is_valuetype(); + ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass()); + const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); + ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); + adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); + val->as_ValueType()->store_flattened(this, ary, adr); + ideal.sync_kit(this); + } else { + if (TypePtr::NULL_PTR->higher_equal(val_t)) { + sync_kit(ideal); + Node* null_ctl = top(); + val = null_check_oop(val, &null_ctl); + if (null_ctl != top()) { + PreserveJVMState pjvms(this); + inc_sp(3); + set_control(null_ctl); + uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); + dec_sp(3); + } + ideal.sync_kit(this); + } + if (!ideal.ctrl()->is_top()) { + ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), + CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), + "store_unknown_value", + val, ary, idx); + } + } + } ideal.end_if(); + sync_kit(ideal); + return; + } else { + if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { + gen_value_type_array_guard(ary, val, 3); + } + } + } } - pop(); // index (already used) - Node* array = pop(); // the array itself if (elemtype == TypeInt::BOOL) { bt = T_BOOLEAN; } else if (bt == T_OBJECT) { - elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); + elemtype = ary_t->elem()->make_oopptr(); } const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); - access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); + access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); } @@ -1495,7 +1663,7 @@ } else { // Path is live. // Update method data profile_taken_branch(target_bci); - adjust_map_after_if(btest, c, prob, branch_block, next_block); + adjust_map_after_if(btest, c, prob, branch_block); if (!stopped()) { merge(target_bci); } @@ -1515,13 +1683,12 @@ } else { // Path is live. // Update method data profile_not_taken_branch(); - adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, - next_block, branch_block); + adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); } } //------------------------------------do_if------------------------------------ -void Parse::do_if(BoolTest::mask btest, Node* c) { +void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { int target_bci = iter().get_dest(); Block* branch_block = successor_for_bci(target_bci); @@ -1610,16 +1777,24 @@ set_control(taken_branch); if (stopped()) { - if (C->eliminate_boxing()) { - // Mark the successor block as parsed + if (C->eliminate_boxing() && !new_path) { + // Mark the successor block as parsed (if we haven't created a new path) branch_block->next_path_num(); } } else { // Update method data profile_taken_branch(target_bci); - adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); + adjust_map_after_if(taken_btest, c, prob, branch_block); if (!stopped()) { - merge(target_bci); + if (new_path) { + // Merge by using a new path + merge_new_path(target_bci); + } else if (ctrl_taken != NULL) { + // Don't merge but save taken branch to be wired by caller + *ctrl_taken = control(); + } else { + merge(target_bci); + } } } } @@ -1628,16 +1803,327 @@ set_control(untaken_branch); // Branch not taken. - if (stopped()) { + if (stopped() && ctrl_taken == NULL) { if (C->eliminate_boxing()) { - // Mark the successor block as parsed + // Mark the successor block as parsed (if caller does not re-wire control flow) next_block->next_path_num(); } } else { // Update method data profile_not_taken_branch(); - adjust_map_after_if(untaken_btest, c, untaken_prob, - next_block, branch_block); + adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); + } +} + +void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { + ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); + // If current method is ValueBootstrapMethods::isSubstitutable(), + // compile the acmp as a regular pointer comparison otherwise we + // could call ValueBootstrapMethods::isSubstitutable() back + if (ACmpOnValues == 0 || method() == subst_method) { + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + return; + } + + if (ACmpOnValues == 3) { + // Substituability test + if (a->is_ValueType()) { + inc_sp(2); + a = a->as_ValueType()->allocate(this, true)->get_oop(); + dec_sp(2); + } + if (b->is_ValueType()) { + inc_sp(2); + b = b->as_ValueType()->allocate(this, true)->get_oop(); + dec_sp(2); + } + + const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); + const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); + + if (ta == NULL || !ta->can_be_value_type_raw() || + tb == NULL || !tb->can_be_value_type_raw()) { + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + return; + } + + Node* cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(cmp); + Node* eq_region = NULL; + if (btest == BoolTest::eq) { + do_if(btest, cmp, true); + if (stopped()) { + return; + } + } else { + assert(btest == BoolTest::ne, "only eq or ne"); + Node* is_not_equal = NULL; + eq_region = new RegionNode(3); + { + PreserveJVMState pjvms(this); + do_if(btest, cmp, false, &is_not_equal); + if (!stopped()) { + eq_region->init_req(1, control()); + } + } + if (is_not_equal == NULL || is_not_equal->is_top()) { + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + return; + } + set_control(is_not_equal); + } + // Pointers not equal, check for values + Node* ne_region = new RegionNode(6); + inc_sp(2); + Node* null_ctl = top(); + Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); + dec_sp(2); + ne_region->init_req(1, null_ctl); + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + + Node* is_value = is_always_locked(not_null_a); + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); + IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); + Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); + set_control(_gvn.transform(new IfFalseNode(is_value_iff))); + ne_region->init_req(2, not_value); + + // One of the 2 pointers refers to a value, check if both are of + // the same class + inc_sp(2); + null_ctl = top(); + Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); + dec_sp(2); + ne_region->init_req(3, null_ctl); + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + Node* kls_a = load_object_klass(not_null_a); + Node* kls_b = load_object_klass(not_null_b); + Node* kls_cmp = CmpP(kls_a, kls_b); + Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); + IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); + Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); + set_control(_gvn.transform(new IfFalseNode(kls_iff))); + ne_region->init_req(4, kls_ne); + + if (stopped()) { + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + } + return; + } + // Both are values of the same class, we need to perform a + // substitutability test. Delegate to + // ValueBootstrapMethods::isSubstitutable(). + + Node* ne_io_phi = PhiNode::make(ne_region, i_o()); + Node* mem = reset_memory(); + Node* ne_mem_phi = PhiNode::make(ne_region, mem); + + Node* eq_io_phi = NULL; + Node* eq_mem_phi = NULL; + if (eq_region != NULL) { + eq_io_phi = PhiNode::make(eq_region, i_o()); + eq_mem_phi = PhiNode::make(eq_region, mem); + } + + set_all_memory(mem); + + kill_dead_locals(); + CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); + call->set_override_symbolic_info(true); + call->init_req(TypeFunc::Parms, not_null_a); + call->init_req(TypeFunc::Parms+1, not_null_b); + inc_sp(2); + set_edges_for_java_call(call, false, false); + Node* ret = set_results_for_java_call(call, false, true); + dec_sp(2); + + // Test the return value of ValueBootstrapMethods::isSubstitutable() + Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); + if (btest == BoolTest::eq) { + do_if(btest, subst_cmp); + } else { + assert(btest == BoolTest::ne, "only eq or ne"); + Node* is_not_equal = NULL; + { + PreserveJVMState pjvms(this); + do_if(btest, subst_cmp, false, &is_not_equal); + if (!stopped()) { + eq_region->init_req(2, control()); + eq_io_phi->init_req(2, i_o()); + eq_mem_phi->init_req(2, reset_memory()); + } + } + set_control(is_not_equal); + } + ne_region->init_req(5, control()); + ne_io_phi->init_req(5, i_o()); + ne_mem_phi->init_req(5, reset_memory()); + + record_for_igvn(ne_region); + set_control(_gvn.transform(ne_region)); + set_i_o(_gvn.transform(ne_io_phi)); + set_all_memory(_gvn.transform(ne_mem_phi)); + + if (btest == BoolTest::ne) { + { + PreserveJVMState pjvms(this); + int target_bci = iter().get_dest(); + merge(target_bci); + } + + record_for_igvn(eq_region); + set_control(_gvn.transform(eq_region)); + set_i_o(_gvn.transform(eq_io_phi)); + set_all_memory(_gvn.transform(eq_mem_phi)); + } + + return; + } + // In the case were both operands might be value types, we need to + // use the new acmp implementation. Otherwise, i.e. if one operand + // is not a value type, we can use the old acmp implementation. + Node* cmp = C->optimize_acmp(&_gvn, a, b); + if (cmp != NULL) { + // Use optimized/old acmp + cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); + do_if(btest, cmp); + return; + } + + Node* ctrl = NULL; + bool safe_for_replace = true; + if (ACmpOnValues != 1) { + // Emit old acmp before new acmp for quick a != b check + cmp = CmpP(a, b); + cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); + if (btest == BoolTest::ne) { + do_if(btest, cmp, true); + if (stopped()) { + return; // Never equal + } + } else if (btest == BoolTest::eq) { + Node* is_equal = NULL; + { + PreserveJVMState pjvms(this); + do_if(btest, cmp, false, &is_equal); + if (!stopped()) { + // Not equal, skip valuetype check + ctrl = new RegionNode(3); + ctrl->init_req(1, control()); + _gvn.set_type(ctrl, Type::CONTROL); + record_for_igvn(ctrl); + safe_for_replace = false; + } + } + if (is_equal == NULL) { + assert(ctrl != NULL, "no control left"); + set_control(_gvn.transform(ctrl)); + return; // Never equal + } + set_control(is_equal); + } + } + + // Null check operand before loading the is_value bit + bool speculate = false; + if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { + // Operand 'b' is never null, swap operands to avoid null check + swap(a, b); + } else if (!too_many_traps(Deoptimization::Reason_speculate_null_check)) { + // Speculate on non-nullness of one operand + if (!_gvn.type(a)->speculative_maybe_null()) { + speculate = true; + } else if (!_gvn.type(b)->speculative_maybe_null()) { + speculate = true; + swap(a, b); + } + } + inc_sp(2); + Node* null_ctl = top(); + Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); + assert(!stopped(), "operand is always null"); + dec_sp(2); + Node* region = new RegionNode(2); + Node* is_value = new PhiNode(region, TypeX_X); + if (null_ctl != top()) { + assert(!speculate, "should never be null"); + region->add_req(null_ctl); + is_value->add_req(_gvn.MakeConX(0)); + } + + Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); + if (ACmpOnValues == 1) { + Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); + Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); + Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); + Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); + is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); + } else { + is_value->init_req(1, is_always_locked(not_null_a)); + } + region->init_req(1, control()); + + set_control(_gvn.transform(region)); + is_value = _gvn.transform(is_value); + + if (ACmpOnValues == 1) { + // Perturbe oop if operand is a value type to make comparison fail + Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); + cmp = _gvn.transform(new CmpPNode(pert, b)); + } else { + // Check for a value type because we already know that operands are equal + cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); + btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; + } + cmp = optimize_cmp_with_klass(cmp); + do_if(btest, cmp); + + if (ctrl != NULL) { + ctrl->init_req(2, control()); + set_control(_gvn.transform(ctrl)); } } @@ -1667,8 +2153,7 @@ // branch, seeing how it constrains a tested value, and then // deciding if it's worth our while to encode this constraint // as graph nodes in the current abstract interpretation map. -void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, - Block* path, Block* other_path) { +void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { if (!c->is_Cmp()) { maybe_add_predicate_after_if(path); return; @@ -1878,6 +2363,10 @@ inc_sp(2); obj = maybe_cast_profiled_obj(obj, k); dec_sp(2); + if (obj->is_ValueType()) { + assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); + obj = obj->as_ValueType()->get_oop(); + } // Make the CmpP use the casted obj addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); load_klass = load_klass->clone(); @@ -2725,20 +3214,25 @@ maybe_add_safepoint(iter().get_dest()); a = null(); b = pop(); - if (!_gvn.type(b)->speculative_maybe_null() && - !too_many_traps(Deoptimization::Reason_speculate_null_check)) { - inc_sp(1); - Node* null_ctl = top(); - b = null_check_oop(b, &null_ctl, true, true, true); - assert(null_ctl->is_top(), "no null control here"); - dec_sp(1); - } else if (_gvn.type(b)->speculative_always_null() && - !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { - inc_sp(1); - b = null_assert(b); - dec_sp(1); + if (b->is_ValueType()) { + // Return constant false because 'b' is always non-null + c = _gvn.makecon(TypeInt::CC_GT); + } else { + if (!_gvn.type(b)->speculative_maybe_null() && + !too_many_traps(Deoptimization::Reason_speculate_null_check)) { + inc_sp(1); + Node* null_ctl = top(); + b = null_check_oop(b, &null_ctl, true, true, true); + assert(null_ctl->is_top(), "no null control here"); + dec_sp(1); + } else if (_gvn.type(b)->speculative_always_null() && + !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { + inc_sp(1); + b = null_assert(b); + dec_sp(1); + } + c = _gvn.transform( new CmpPNode(b, a) ); } - c = _gvn.transform( new CmpPNode(b, a) ); do_ifnull(btest, c); break; @@ -2749,9 +3243,7 @@ maybe_add_safepoint(iter().get_dest()); a = access_resolve(pop(), 0); b = access_resolve(pop(), 0); - c = _gvn.transform( new CmpPNode(b, a) ); - c = optimize_cmp_with_klass(c); - do_if(btest, c); + do_acmp(btest, a, b); break; case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; @@ -2806,7 +3298,7 @@ do_instanceof(); break; case Bytecodes::_anewarray: - do_anewarray(); + do_newarray(); break; case Bytecodes::_newarray: do_newarray((BasicType)iter().get_index()); @@ -2817,6 +3309,12 @@ case Bytecodes::_new: do_new(); break; + case Bytecodes::_defaultvalue: + do_defaultvalue(); + break; + case Bytecodes::_withfield: + do_withfield(); + break; case Bytecodes::_jsr: case Bytecodes::_jsr_w: