< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

        

*** 65,74 **** --- 65,75 ---- #include "opto/phaseX.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/stringopts.hpp" #include "opto/type.hpp" + #include "opto/valuetypenode.hpp" #include "opto/vectornode.hpp" #include "runtime/arguments.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp"
*** 415,424 **** --- 416,429 ---- Node* opaq = opaque4_node(i); if (!useful.member(opaq)) { remove_opaque4_node(opaq); } } + // Remove useless value type nodes + if (_value_type_nodes != NULL) { + _value_type_nodes->remove_useless_nodes(useful.member_set()); + } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); bs->eliminate_useless_gc_barriers(useful, this); // clean up the late inline lists remove_useless_late_inlines(&_string_late_inlines, useful); remove_useless_late_inlines(&_boxing_late_inlines, useful);
*** 540,549 **** --- 545,560 ---- } ResourceMark rm; _scratch_const_size = const_size; int size = C2Compiler::initial_code_buffer_size(const_size); + #ifdef ASSERT + if (C->has_scalarized_args()) { + // Oop verification for loading object fields from scalarized value types in the new entry point requires lots of space + size += 5120; + } + #endif blob = BufferBlob::create("Compile::scratch_buffer", size); // Record the buffer blob for next time. set_scratch_buffer_blob(blob); // Have we run out of code space? if (scratch_buffer_blob() == NULL) {
*** 604,621 **** if (is_branch) { MacroAssembler masm(&buf); masm.bind(fakeL); n->as_MachBranch()->save_label(&saveL, &save_bnum); n->as_MachBranch()->label_set(&fakeL, 0); } n->emit(buf, this->regalloc()); // Emitting into the scratch buffer should not fail assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason()); ! if (is_branch) // Restore label. n->as_MachBranch()->label_set(saveL, save_bnum); // End scratch_emit_size section. set_in_scratch_emit_size(false); return buf.insts_size(); --- 615,639 ---- if (is_branch) { MacroAssembler masm(&buf); masm.bind(fakeL); n->as_MachBranch()->save_label(&saveL, &save_bnum); n->as_MachBranch()->label_set(&fakeL, 0); + } else if (n->is_MachProlog()) { + saveL = ((MachPrologNode*)n)->_verified_entry; + ((MachPrologNode*)n)->_verified_entry = &fakeL; } n->emit(buf, this->regalloc()); // Emitting into the scratch buffer should not fail assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason()); ! // Restore label. ! if (is_branch) { n->as_MachBranch()->label_set(saveL, save_bnum); + } else if (n->is_MachProlog()) { + ((MachPrologNode*)n)->_verified_entry = saveL; + } // End scratch_emit_size section. set_in_scratch_emit_size(false); return buf.insts_size();
*** 644,653 **** --- 662,673 ---- _stub_name(NULL), _stub_entry_point(NULL), _max_node_limit(MaxNodeLimit), _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), + _sp_inc_slot(0), + _sp_inc_slot_offset_in_bytes(0), _inlining_progress(false), _inlining_incrementally(false), _do_cleanup(false), _has_reserved_stack_access(target->has_reserved_stack_access()), #ifndef PRODUCT
*** 779,789 **** init_start(s); cg = CallGenerator::for_osr(method(), entry_bci()); } else { // Normal case. init_tf(TypeFunc::make(method())); ! StartNode* s = new StartNode(root(), tf()->domain()); initial_gvn()->set_type_bottom(s); init_start(s); if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) { // With java.lang.ref.reference.get() we must go through the // intrinsic - even when get() is the root --- 799,809 ---- init_start(s); cg = CallGenerator::for_osr(method(), entry_bci()); } else { // Normal case. init_tf(TypeFunc::make(method())); ! StartNode* s = new StartNode(root(), tf()->domain_cc()); initial_gvn()->set_type_bottom(s); init_start(s); if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) { // With java.lang.ref.reference.get() we must go through the // intrinsic - even when get() is the root
*** 906,915 **** --- 926,942 ---- // Now that we know the size of all the monitors we can add a fixed slot // for the original deopt pc. _orig_pc_slot = fixed_slots(); int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size); + + if (needs_stack_repair()) { + // One extra slot for the special stack increment value + _sp_inc_slot = next_slot; + next_slot += 2; + } + set_fixed_slots(next_slot); // Compute when to use implicit null checks. Used by matching trap based // nodes and NullCheck optimization. set_allowed_deopt_reasons();
*** 931,940 **** --- 958,976 ---- if (is_osr_compilation()) { _code_offsets.set_value(CodeOffsets::Verified_Entry, 0); _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size); } else { _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size); + if (_code_offsets.value(CodeOffsets::Verified_Value_Entry) == -1) { + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, _first_block_size); + } + if (_code_offsets.value(CodeOffsets::Verified_Value_Entry_RO) == -1) { + _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, _first_block_size); + } + if (_code_offsets.value(CodeOffsets::Entry) == -1) { + _code_offsets.set_value(CodeOffsets::Entry, _first_block_size); + } _code_offsets.set_value(CodeOffsets::OSR_Entry, 0); } env()->register_method(_method, _entry_bci, &_code_offsets,
*** 976,985 **** --- 1012,1023 ---- _stub_name(stub_name), _stub_entry_point(NULL), _max_node_limit(MaxNodeLimit), _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), + _sp_inc_slot(0), + _sp_inc_slot_offset_in_bytes(0), _inlining_progress(false), _inlining_incrementally(false), _has_reserved_stack_access(false), #ifndef PRODUCT _trace_opto_output(directive->TraceOptoOutputOption),
*** 1202,1211 **** --- 1240,1250 ---- _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); + _value_type_nodes = new (comp_arena()) Unique_Node_List(comp_arena()); register_library_intrinsics(); } //---------------------------init_start---------------------------------------- // Install the StartNode on this compile object.
*** 1426,1436 **** bool is_known_inst = tj->isa_oopptr() != NULL && tj->is_oopptr()->is_known_instance(); // Process weird unsafe references. if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { ! assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); tj = TypeOopPtr::BOTTOM; ptr = tj->ptr(); offset = tj->offset(); } --- 1465,1476 ---- bool is_known_inst = tj->isa_oopptr() != NULL && tj->is_oopptr()->is_known_instance(); // Process weird unsafe references. if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { ! bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass(); ! assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops"); assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); tj = TypeOopPtr::BOTTOM; ptr = tj->ptr(); offset = tj->offset(); }
*** 1443,1462 **** } if( ta && is_known_inst ) { if ( offset != Type::OffsetBot && offset > arrayOopDesc::length_offset_in_bytes() ) { offset = Type::OffsetBot; // Flatten constant access into array body only ! tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); } } else if( ta && _AliasLevel >= 2 ) { // For arrays indexed by constant indices, we flatten the alias // space to include all of the array body. Only the header, klass // and array length can be accessed un-aliased. if( offset != Type::OffsetBot ) { if( ta->const_oop() ) { // MethodData* or Method* offset = Type::OffsetBot; // Flatten constant access into array body ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is. tj = ta = TypeAryPtr::RANGE; } else if( offset == oopDesc::klass_offset_in_bytes() ) { tj = TypeInstPtr::KLASS; // all klass loads look alike --- 1483,1504 ---- } if( ta && is_known_inst ) { if ( offset != Type::OffsetBot && offset > arrayOopDesc::length_offset_in_bytes() ) { offset = Type::OffsetBot; // Flatten constant access into array body only ! tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id()); } } else if( ta && _AliasLevel >= 2 ) { // For arrays indexed by constant indices, we flatten the alias // space to include all of the array body. Only the header, klass // and array length can be accessed un-aliased. + // For flattened value type array, each field has its own slice so + // we must include the field offset. if( offset != Type::OffsetBot ) { if( ta->const_oop() ) { // MethodData* or Method* offset = Type::OffsetBot; // Flatten constant access into array body ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is. tj = ta = TypeAryPtr::RANGE; } else if( offset == oopDesc::klass_offset_in_bytes() ) { tj = TypeInstPtr::KLASS; // all klass loads look alike
*** 1468,1506 **** ptr = TypePtr::BotPTR; } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) { ta = tj->isa_aryptr(); } else { // Random constant offset into array body offset = Type::OffsetBot; // Flatten constant access into array body ! tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); } } // Arrays of fixed size alias with arrays of unknown size. if (ta->size() != TypeInt::POS) { const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); } // Arrays of known objects become arrays of unknown objects. if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); } if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); } // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. if (ta->elem() == TypeInt::BOOL) { const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); } // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { ! tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); } } // Oop pointers need some flattening const TypeInstPtr *to = tj->isa_instptr(); --- 1510,1548 ---- ptr = TypePtr::BotPTR; } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) { ta = tj->isa_aryptr(); } else { // Random constant offset into array body offset = Type::OffsetBot; // Flatten constant access into array body ! tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } } // Arrays of fixed size alias with arrays of unknown size. if (ta->size() != TypeInt::POS) { const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset()); } // Arrays of known objects become arrays of unknown objects. if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset()); } if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset()); } // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. if (ta->elem() == TypeInt::BOOL) { const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); ! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset()); } // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { ! tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset()); } } // Oop pointers need some flattening const TypeInstPtr *to = tj->isa_instptr();
*** 1510,1538 **** if (to->klass() != ciEnv::current()->Class_klass() || offset < k->size_helper() * wordSize) { // No constant oop pointers (such as Strings); they alias with // unknown strings. assert(!is_known_inst, "not scalarizable allocation"); ! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); } } else if( is_known_inst ) { tj = to; // Keep NotNull and klass_is_exact for instance type } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. ! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); } if (to->speculative() != NULL) { ! tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id()); } // Canonicalize the holder of this field if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types ! tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); } } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) { to = tj->is_instptr(); } else if (offset < 0 || offset >= k->size_helper() * wordSize) { // Static fields are in the space above the normal instance --- 1552,1580 ---- if (to->klass() != ciEnv::current()->Class_klass() || offset < k->size_helper() * wordSize) { // No constant oop pointers (such as Strings); they alias with // unknown strings. assert(!is_known_inst, "not scalarizable allocation"); ! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset)); } } else if( is_known_inst ) { tj = to; // Keep NotNull and klass_is_exact for instance type } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. ! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset)); } if (to->speculative() != NULL) { ! tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->instance_id()); } // Canonicalize the holder of this field if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types ! tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset)); } } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) { to = tj->is_instptr(); } else if (offset < 0 || offset >= k->size_helper() * wordSize) { // Static fields are in the space above the normal instance
*** 1544,1556 **** } } else { ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); if (!k->equals(canonical_holder) || tj->offset() != offset) { if( is_known_inst ) { ! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); } else { ! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); } } } } --- 1586,1598 ---- } } else { ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); if (!k->equals(canonical_holder) || tj->offset() != offset) { if( is_known_inst ) { ! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), to->instance_id()); } else { ! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset)); } } } }
*** 1563,1581 **** // use NotNull as the PTR. if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) { tj = tk = TypeKlassPtr::make(TypePtr::NotNull, TypeKlassPtr::OBJECT->klass(), ! offset); } ciKlass* klass = tk->klass(); ! if( klass->is_obj_array_klass() ) { ciKlass* k = TypeAryPtr::OOPS->klass(); if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs k = TypeInstPtr::BOTTOM->klass(); ! tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset ); } // Check for precise loads from the primary supertype array and force them // to the supertype cache alias index. Check for generic array loads from // the primary supertype array and also force them to the supertype cache --- 1605,1623 ---- // use NotNull as the PTR. if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) { tj = tk = TypeKlassPtr::make(TypePtr::NotNull, TypeKlassPtr::OBJECT->klass(), ! Type::Offset(offset)); } ciKlass* klass = tk->klass(); ! if (klass != NULL && klass->is_obj_array_klass()) { ciKlass* k = TypeAryPtr::OOPS->klass(); if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs k = TypeInstPtr::BOTTOM->klass(); ! tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset)); } // Check for precise loads from the primary supertype array and force them // to the supertype cache alias index. Check for generic array loads from // the primary supertype array and also force them to the supertype cache
*** 1587,1597 **** if (offset == Type::OffsetBot || (offset >= primary_supers_offset && offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) || offset == (int)in_bytes(Klass::secondary_super_cache_offset())) { offset = in_bytes(Klass::secondary_super_cache_offset()); ! tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); } } // Flatten all Raw pointers together. if (tj->base() == Type::RawPtr) --- 1629,1639 ---- if (offset == Type::OffsetBot || (offset >= primary_supers_offset && offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) || offset == (int)in_bytes(Klass::secondary_super_cache_offset())) { offset = in_bytes(Klass::secondary_super_cache_offset()); ! tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset)); } } // Flatten all Raw pointers together. if (tj->base() == Type::RawPtr)
*** 1782,1799 **** if (flat->isa_instptr()) { if (flat->offset() == java_lang_Class::klass_offset_in_bytes() && flat->is_instptr()->klass() == env()->Class_klass()) alias_type(idx)->set_rewritable(false); } if (flat->isa_aryptr()) { #ifdef ASSERT const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); #endif if (flat->offset() == TypePtr::OffsetBot) { ! alias_type(idx)->set_element(flat->is_aryptr()->elem()); } } if (flat->isa_klassptr()) { if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) alias_type(idx)->set_rewritable(false); --- 1824,1849 ---- if (flat->isa_instptr()) { if (flat->offset() == java_lang_Class::klass_offset_in_bytes() && flat->is_instptr()->klass() == env()->Class_klass()) alias_type(idx)->set_rewritable(false); } + ciField* field = NULL; if (flat->isa_aryptr()) { #ifdef ASSERT const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); #endif + const Type* elemtype = flat->is_aryptr()->elem(); if (flat->offset() == TypePtr::OffsetBot) { ! alias_type(idx)->set_element(elemtype); ! } ! int field_offset = flat->is_aryptr()->field_offset().get(); ! if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { ! ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); ! field_offset += vk->first_field_offset(); ! field = vk->get_field_by_offset(field_offset, false); } } if (flat->isa_klassptr()) { if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) alias_type(idx)->set_rewritable(false);
*** 1809,1838 **** // references into JavaThread.) // Check for final fields. const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { - ciField* field; if (tinst->const_oop() != NULL && tinst->klass() == ciEnv::current()->Class_klass() && tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { // static field ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), true); } else { ! ciInstanceKlass *k = tinst->klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), false); } assert(field == NULL || original_field == NULL || (field->holder() == original_field->holder() && field->offset() == original_field->offset() && field->is_static() == original_field->is_static()), "wrong field?"); // Set field() and is_rewritable() attributes. if (field != NULL) alias_type(idx)->set_field(field); } - } // Fill the cache for next time. ace->_adr_type = adr_type; ace->_index = idx; assert(alias_type(adr_type) == alias_type(idx), "type must be installed"); --- 1859,1891 ---- // references into JavaThread.) // Check for final fields. const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { if (tinst->const_oop() != NULL && tinst->klass() == ciEnv::current()->Class_klass() && tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { // static field ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), true); + } else if (tinst->klass()->is_valuetype()) { + // Value type field + ciValueKlass* vk = tinst->value_klass(); + field = vk->get_field_by_offset(tinst->offset(), false); } else { ! ciInstanceKlass* k = tinst->klass()->as_instance_klass(); field = k->get_field_by_offset(tinst->offset(), false); } + } assert(field == NULL || original_field == NULL || (field->holder() == original_field->holder() && field->offset() == original_field->offset() && field->is_static() == original_field->is_static()), "wrong field?"); // Set field() and is_rewritable() attributes. if (field != NULL) alias_type(idx)->set_field(field); } // Fill the cache for next time. ace->_adr_type = adr_type; ace->_index = idx; assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
*** 2001,2010 **** --- 2054,2094 ---- igvn.replace_node(opaq, opaq->in(2)); } assert(opaque4_count() == 0, "should be empty"); } + void Compile::add_value_type(Node* n) { + assert(n->is_ValueTypeBase(), "unexpected node"); + if (_value_type_nodes != NULL) { + _value_type_nodes->push(n); + } + } + + void Compile::remove_value_type(Node* n) { + assert(n->is_ValueTypeBase(), "unexpected node"); + if (_value_type_nodes != NULL) { + _value_type_nodes->remove(n); + } + } + + void Compile::process_value_types(PhaseIterGVN &igvn) { + // Make value types scalar in safepoints + while (_value_type_nodes->size() != 0) { + ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase(); + vt->make_scalar_in_safepoints(&igvn); + if (vt->is_ValueTypePtr()) { + igvn.replace_node(vt, vt->get_oop()); + } else { + if (vt->outcnt() == 0) { + igvn.remove_dead_node(vt); + } + } + } + _value_type_nodes = NULL; + igvn.optimize(); + } + // StringOpts and late inlining of string methods void Compile::inline_string_calls(bool parse_time) { { // remove useless nodes to make the usage analysis simpler ResourceMark rm;
*** 2275,2284 **** --- 2359,2373 ---- set_for_igvn(&new_worklist); igvn = PhaseIterGVN(initial_gvn()); igvn.optimize(); } + if (_value_type_nodes->size() > 0) { + // Do this once all inlining is over to avoid getting inconsistent debug info + process_value_types(igvn); + } + // Perform escape analysis if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { if (has_loops()) { // Cleanup graph (remove dead nodes). TracePhase tp("idealLoop", &timers[_t_idealLoop]);
*** 2440,2450 **** } print_method(PHASE_OPTIMIZE_FINISHED, 2); } - //------------------------------Code_Gen--------------------------------------- // Given a graph, generate code for it void Compile::Code_Gen() { if (failing()) { return; --- 2529,2538 ----
*** 2755,2764 **** --- 2843,2853 ---- mem = prev->in(MemNode::Memory); } } } + //------------------------------final_graph_reshaping_impl---------------------- // Implement items 1-5 from final_graph_reshaping below. void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) { if ( n->outcnt() == 0 ) return; // dead node
*** 3489,3498 **** --- 3578,3595 ---- Node* cmp = new CmpLNode(andl, n->in(2)); n->subsume_by(cmp, this); } break; } + #ifdef ASSERT + case Op_ValueTypePtr: + case Op_ValueType: { + n->dump(-1); + assert(false, "value type node was not removed"); + break; + } + #endif default: assert(!n->is_Call(), ""); assert(!n->is_Mem(), ""); assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN"); break;
*** 4149,4159 **** // (0) superklass is java.lang.Object (can occur in reflective code) // (1) subklass is already limited to a subtype of superklass => always ok // (2) subklass does not overlap with superklass => always fail // (3) superklass has NO subtypes and we can check with a simple compare. int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) { ! if (StressReflectiveCode) { return SSC_full_test; // Let caller generate the general case. } if (superk == env()->Object_klass()) { return SSC_always_true; // (0) this test cannot fail --- 4246,4256 ---- // (0) superklass is java.lang.Object (can occur in reflective code) // (1) subklass is already limited to a subtype of superklass => always ok // (2) subklass does not overlap with superklass => always fail // (3) superklass has NO subtypes and we can check with a simple compare. int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) { ! if (StressReflectiveCode || superk == NULL || subk == NULL) { return SSC_full_test; // Let caller generate the general case. } if (superk == env()->Object_klass()) { return SSC_always_true; // (0) this test cannot fail
*** 4598,4607 **** --- 4695,4725 ---- igvn.check_no_speculative_types(); #endif } } + Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) { + const TypeInstPtr* ta = phase->type(a)->isa_instptr(); + const TypeInstPtr* tb = phase->type(b)->isa_instptr(); + if (!EnableValhalla || ta == NULL || tb == NULL || + ta->is_zero_type() || tb->is_zero_type() || + !ta->can_be_value_type() || !tb->can_be_value_type()) { + // Use old acmp if one operand is null or not a value type + return new CmpPNode(a, b); + } else if (ta->is_valuetypeptr() || tb->is_valuetypeptr()) { + // We know that one operand is a value type. Therefore, + // new acmp will only return true if both operands are NULL. + // Check if both operands are null by or'ing the oops. + a = phase->transform(new CastP2XNode(NULL, a)); + b = phase->transform(new CastP2XNode(NULL, b)); + a = phase->transform(new OrXNode(a, b)); + return new CmpXNode(a, phase->MakeConX(0)); + } + // Use new acmp + return NULL; + } + // Auxiliary method to support randomized stressing/fuzzing. // // This method can be called the arbitrary number of times, with current count // as the argument. The logic allows selecting a single candidate from the // running list of candidates as follows:
< prev index next >