< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

        

*** 139,148 **** --- 139,158 ---- // add the phantom_obj only once to them. ptnodes_worklist.append(phantom_obj); java_objects_worklist.append(phantom_obj); for( uint next = 0; next < ideal_nodes.size(); ++next ) { Node* n = ideal_nodes.at(next); + if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && + !n->in(MemNode::Address)->is_AddP() && + _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { + // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA + Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); + _igvn->register_new_node_with_optimizer(addp); + _igvn->replace_input_of(n, MemNode::Address, addp); + ideal_nodes.push(addp); + _nodes.at_put_grow(addp->_idx, NULL, NULL); + } // Create PointsTo nodes and add them to Connection Graph. Called // only once per ideal node since ideal_nodes is Unique_Node list. add_node_to_connection_graph(n, &delayed_worklist); PointsToNode* ptn = ptnode_adr(n->_idx); if (ptn != NULL && ptn != phantom_obj) {
*** 161,171 **** if (n->is_MergeMem()) { // Collect all MergeMem nodes to add memory slices for // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && ! (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { // Collect all MemBarStoreStore nodes so that depending on the // escape status of the associated Allocate node some of them --- 171,182 ---- if (n->is_MergeMem()) { // Collect all MergeMem nodes to add memory slices for // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && ! ((n->Opcode() == Op_CmpP && !(((CmpPNode*)n)->has_perturbed_operand() != NULL)) || ! n->Opcode() == Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { // Collect all MemBarStoreStore nodes so that depending on the // escape status of the associated Allocate node some of them
*** 371,380 **** --- 382,402 ---- if ((n->as_Call()->returns_pointer() && n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || (n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method())) { add_call_node(n->as_Call()); + } else if (n->as_Call()->tf()->returns_value_type_as_fields()) { + bool returns_oop = false; + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { + ProjNode* pn = n->fast_out(i)->as_Proj(); + if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { + returns_oop = true; + } + } + if (returns_oop) { + add_call_node(n->as_Call()); + } } } return; } // Put this check here to process call arguments since some call nodes
*** 479,490 **** } break; } case Op_Proj: { // we are only interested in the oop result projection from a call ! if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && ! n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); } break; } --- 501,514 ---- } break; } case Op_Proj: { // we are only interested in the oop result projection from a call ! if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && ! (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { ! assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || ! n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); } break; }
*** 635,646 **** } ELSE_FAIL("Op_Phi"); } case Op_Proj: { // we are only interested in the oop result projection from a call ! if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && ! n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); break; } ELSE_FAIL("Op_Proj"); } --- 659,672 ---- } ELSE_FAIL("Op_Phi"); } case Op_Proj: { // we are only interested in the oop result projection from a call ! if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && ! (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_ptr())) { ! assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || ! n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?"); add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); break; } ELSE_FAIL("Op_Proj"); }
*** 798,808 **** } return false; } void ConnectionGraph::add_call_node(CallNode* call) { ! assert(call->returns_pointer(), "only for call which returns pointer"); uint call_idx = call->_idx; if (call->is_Allocate()) { Node* k = call->in(AllocateNode::KlassNode); const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); assert(kt != NULL, "TypeKlassPtr required."); --- 824,834 ---- } return false; } void ConnectionGraph::add_call_node(CallNode* call) { ! assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer"); uint call_idx = call->_idx; if (call->is_Allocate()) { Node* k = call->in(AllocateNode::KlassNode); const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); assert(kt != NULL, "TypeKlassPtr required.");
*** 885,895 **** // it's fields will be marked as NoEscape at least. add_java_object(call, PointsToNode::NoEscape); ptnode_adr(call_idx)->set_scalar_replaceable(false); } else { // Determine whether any arguments are returned. ! const TypeTuple* d = call->tf()->domain(); bool ret_arg = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { if (d->field_at(i)->isa_ptr() != NULL && call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { ret_arg = true; --- 911,921 ---- // it's fields will be marked as NoEscape at least. add_java_object(call, PointsToNode::NoEscape); ptnode_adr(call_idx)->set_scalar_replaceable(false); } else { // Determine whether any arguments are returned. ! const TypeTuple* d = call->tf()->domain_cc(); bool ret_arg = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { if (d->field_at(i)->isa_ptr() != NULL && call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { ret_arg = true;
*** 932,942 **** call->as_CallLeaf()->is_call_to_arraycopystub(); // fall through case Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. ! const TypeTuple * d = call->tf()->domain(); bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); Node *arg = call->in(i); if (arg == NULL) { --- 958,968 ---- call->as_CallLeaf()->is_call_to_arraycopystub(); // fall through case Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. ! const TypeTuple * d = call->tf()->domain_sig(); bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); Node *arg = call->in(i); if (arg == NULL) {
*** 962,972 **** if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || aat->isa_ptr() != NULL, "expecting an Ptr"); bool arg_has_oops = aat->isa_oopptr() && (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || ! (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); if (i == TypeFunc::Parms) { src_has_oops = arg_has_oops; } // // src or dst could be j.l.Object when other is basic type array: --- 988,1001 ---- if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || aat->isa_ptr() != NULL, "expecting an Ptr"); bool arg_has_oops = aat->isa_oopptr() && (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || ! (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()) || ! (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL && ! aat->isa_aryptr()->elem()->isa_valuetype() && ! aat->isa_aryptr()->elem()->isa_valuetype()->value_klass()->contains_oops())); if (i == TypeFunc::Parms) { src_has_oops = arg_has_oops; } // // src or dst could be j.l.Object when other is basic type array:
*** 1001,1011 **** strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || ! strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) ))) { call->dump(); fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); } #endif --- 1030,1042 ---- strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || ! strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || ! strcmp(call->as_CallLeaf()->_name, "load_unknown_value") == 0 || ! strcmp(call->as_CallLeaf()->_name, "store_unknown_value") == 0) ))) { call->dump(); fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); } #endif
*** 1060,1070 **** } BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; // fall-through if not a Java method or no analyzer information if (call_analyzer != NULL) { PointsToNode* call_ptn = ptnode_adr(call->_idx); ! const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); int k = i - TypeFunc::Parms; Node* arg = call->in(i); PointsToNode* arg_ptn = ptnode_adr(arg->_idx); --- 1091,1101 ---- } BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; // fall-through if not a Java method or no analyzer information if (call_analyzer != NULL) { PointsToNode* call_ptn = ptnode_adr(call->_idx); ! const TypeTuple* d = call->tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); int k = i - TypeFunc::Parms; Node* arg = call->in(i); PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
*** 1104,1114 **** } default: { // Fall-through here if not a Java method or no analyzer information // or some other type of call, assume the worst case: all arguments // globally escape. ! const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); if (at->isa_oopptr() != NULL) { Node* arg = call->in(i); if (arg->is_AddP()) { --- 1135,1145 ---- } default: { // Fall-through here if not a Java method or no analyzer information // or some other type of call, assume the worst case: all arguments // globally escape. ! const TypeTuple* d = call->tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); if (at->isa_oopptr() != NULL) { Node* arg = call->in(i); if (arg->is_AddP()) {
*** 1625,1637 **** } } if (missed_obj != NULL) { tty->print_cr("----------field---------------------------------"); field->dump(); ! tty->print_cr("----------missed referernce to object-----------"); missed_obj->dump(); ! tty->print_cr("----------object referernced by init store -----"); store->dump(); val->dump(); assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); } } --- 1656,1668 ---- } } if (missed_obj != NULL) { tty->print_cr("----------field---------------------------------"); field->dump(); ! tty->print_cr("----------missed reference to object------------"); missed_obj->dump(); ! tty->print_cr("----------object referenced by init store-------"); store->dump(); val->dump(); assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); } }
*** 2063,2074 **** dst->set_arraycopy_dst(); } bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { const Type* adr_type = n->as_AddP()->bottom_type(); BasicType bt = T_INT; ! if (offset == Type::OffsetBot) { // Check only oop fields. if (!adr_type->isa_aryptr() || (adr_type->isa_aryptr()->klass() == NULL) || adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { // OffsetBot is used to reference array's element. Ignore first AddP. --- 2094,2106 ---- dst->set_arraycopy_dst(); } bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { const Type* adr_type = n->as_AddP()->bottom_type(); + int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; BasicType bt = T_INT; ! if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { // Check only oop fields. if (!adr_type->isa_aryptr() || (adr_type->isa_aryptr()->klass() == NULL) || adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { // OffsetBot is used to reference array's element. Ignore first AddP.
*** 2076,2086 **** bt = T_OBJECT; } } } else if (offset != oopDesc::klass_offset_in_bytes()) { if (adr_type->isa_instptr()) { ! ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); if (field != NULL) { bt = field->layout_type(); } else { // Check for unsafe oop field access if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || --- 2108,2118 ---- bt = T_OBJECT; } } } else if (offset != oopDesc::klass_offset_in_bytes()) { if (adr_type->isa_instptr()) { ! ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); if (field != NULL) { bt = field->layout_type(); } else { // Check for unsafe oop field access if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
*** 2096,2123 **** // Ignore array length load. } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { // Ignore first AddP. } else { const Type* elemtype = adr_type->isa_aryptr()->elem(); bt = elemtype->array_element_basic_type(); } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { bt = T_OBJECT; } } } ! return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); } // Returns unique pointed java object or NULL. JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { ! assert(!_collecting, "should not call when contructed graph"); // If the node was created after the escape computation we can't answer. uint idx = n->_idx; if (idx >= nodes_size()) { return NULL; } --- 2128,2161 ---- // Ignore array length load. } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { // Ignore first AddP. } else { const Type* elemtype = adr_type->isa_aryptr()->elem(); + if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { + ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); + field_offset += vk->first_field_offset(); + bt = vk->get_field_by_offset(field_offset, false)->layout_type(); + } else { bt = elemtype->array_element_basic_type(); } + } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { bt = T_OBJECT; } } } ! return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_NARROWOOP || bt == T_ARRAY); } // Returns unique pointed java object or NULL. JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { ! assert(!_collecting, "should not call when constructed graph"); // If the node was created after the escape computation we can't answer. uint idx = n->_idx; if (idx >= nodes_size()) { return NULL; }
*** 2250,2262 **** assert(offs != Type::OffsetBot || adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), "offset must be a constant or it is initialization of array"); return offs; } ! const TypePtr *t_ptr = adr_type->isa_ptr(); ! assert(t_ptr != NULL, "must be a pointer type"); ! return t_ptr->offset(); } Node* ConnectionGraph::get_addp_base(Node *addp) { assert(addp->is_AddP(), "must be AddP"); // --- 2288,2298 ---- assert(offs != Type::OffsetBot || adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), "offset must be a constant or it is initialization of array"); return offs; } ! return adr_type->is_ptr()->flattened_offset(); } Node* ConnectionGraph::get_addp_base(Node *addp) { assert(addp->is_AddP(), "must be AddP"); //
*** 2407,2418 **** --- 2443,2461 ---- // compute an appropriate address type (cases #3 and #5). assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); assert(offs != Type::OffsetBot, "offset must be a constant"); + if (base_t->isa_aryptr() != NULL) { + // In the case of a flattened value type array, each field has its + // own slice so we need to extract the field being accessed from + // the address computation + t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); + } else { t = base_t->add_offset(offs)->is_oopptr(); } + } int inst_id = base_t->instance_id(); assert(!t->is_known_instance() || t->instance_id() == inst_id, "old type must be non-instance or match new type"); // The type 't' could be subclass of 'base_t'.
*** 2423,2443 **** // It could happened on subclass's branch (from the type profiling // inlining) which was not eliminated during parsing since the exactness // of the allocation type was not propagated to the subclass type check. // // Or the type 't' could be not related to 'base_t' at all. ! // It could happened when CHA type is different from MDO type on a dead path // (for example, from instanceof check) which is not collapsed during parsing. // // Do nothing for such AddP node and don't process its users since // this code branch will go away. // if (!t->is_known_instance() && !base_t->klass()->is_subtype_of(t->klass())) { return false; // bail out } ! const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); // Do NOT remove the next line: ensure a new alias index is allocated // for the instance type. Note: C++ will not remove it since the call // has side effect. int alias_idx = _compile->get_alias_index(tinst); igvn->set_type(addp, tinst); --- 2466,2492 ---- // It could happened on subclass's branch (from the type profiling // inlining) which was not eliminated during parsing since the exactness // of the allocation type was not propagated to the subclass type check. // // Or the type 't' could be not related to 'base_t' at all. ! // It could happen when CHA type is different from MDO type on a dead path // (for example, from instanceof check) which is not collapsed during parsing. // // Do nothing for such AddP node and don't process its users since // this code branch will go away. // if (!t->is_known_instance() && !base_t->klass()->is_subtype_of(t->klass())) { return false; // bail out } ! const TypePtr* tinst = base_t->add_offset(t->offset()); ! if (tinst->isa_aryptr() && t->isa_aryptr()) { ! // In the case of a flattened value type array, each field has its ! // own slice so we need to keep track of the field being accessed. ! tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); ! } ! // Do NOT remove the next line: ensure a new alias index is allocated // for the instance type. Note: C++ will not remove it since the call // has side effect. int alias_idx = _compile->get_alias_index(tinst); igvn->set_type(addp, tinst);
*** 3137,3147 **** continue; } // push allocation's users on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); ! if(use->is_Mem() && use->in(MemNode::Address) == n) { // Load/store to instance's field memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use); --- 3186,3196 ---- continue; } // push allocation's users on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); ! if (use->is_Mem() && use->in(MemNode::Address) == n) { // Load/store to instance's field memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use);
*** 3174,3183 **** --- 3223,3240 ---- } else if (use->Opcode() == Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } + } else if (use->Opcode() == Op_Return) { + assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type"); + // Get ValueKlass by removing the tag bit from the metadata pointer + Node* klass = use->in(TypeFunc::Parms); + intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con(); + clear_nth_bit(ptr, 0); + assert(Metaspace::contains((void*)ptr), "should be klass"); + assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field"); } else { uint op = use->Opcode(); if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && (use->in(MemNode::Memory) == n)) { // They overwrite memory edge corresponding to destination array,
*** 3185,3195 **** } else if (!(op == Op_CmpP || op == Op_Conv2B || op == Op_CastP2X || op == Op_StoreCM || op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || ! BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); } #endif --- 3242,3253 ---- } else if (!(op == Op_CmpP || op == Op_Conv2B || op == Op_CastP2X || op == Op_StoreCM || op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || ! BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || ! op == Op_ValueType)) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); } #endif
*** 3252,3261 **** --- 3310,3322 ---- } else if (n->Opcode() == Op_StrCompressedCopy || n->Opcode() == Op_EncodeISOArray) { // get the memory projection n = n->find_out_with(Op_SCMemProj); assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); + } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL && + strcmp(n->as_CallLeaf()->_name, "store_unknown_value") == 0) { + n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); const Type *addr_t = igvn->type(addr); if (addr_t == Type::TOP)
*** 3292,3310 **** } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use); } #ifdef ASSERT ! } else if(use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); } else if (use->Opcode() == Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { uint op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, --- 3353,3375 ---- } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use); } #ifdef ASSERT ! } else if (use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); } else if (use->Opcode() == Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } + } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL && + strcmp(use->as_CallLeaf()->_name, "store_unknown_value") == 0) { + // store_unknown_value overwrites destination array + memnode_worklist.append_if_missing(use); } else { uint op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array,
*** 3322,3332 **** } } // Phase 3: Process MergeMem nodes from mergemem_worklist. // Walk each memory slice moving the first node encountered of each ! // instance type to the the input corresponding to its alias index. uint length = _mergemem_worklist.length(); for( uint next = 0; next < length; ++next ) { MergeMemNode* nmm = _mergemem_worklist.at(next); assert(!visited.test_set(nmm->_idx), "should not be visited before"); // Note: we don't want to use MergeMemStream here because we only want to --- 3387,3397 ---- } } // Phase 3: Process MergeMem nodes from mergemem_worklist. // Walk each memory slice moving the first node encountered of each ! // instance type to the input corresponding to its alias index. uint length = _mergemem_worklist.length(); for( uint next = 0; next < length; ++next ) { MergeMemNode* nmm = _mergemem_worklist.at(next); assert(!visited.test_set(nmm->_idx), "should not be visited before"); // Note: we don't want to use MergeMemStream here because we only want to
*** 3394,3404 **** // Phase 4: Update the inputs of non-instance memory Phis and // the Memory input of memnodes // First update the inputs of any non-instance Phi's from // which we split out an instance Phi. Note we don't have ! // to recursively process Phi's encounted on the input memory // chains as is done in split_memory_phi() since they will // also be processed here. for (int j = 0; j < orig_phis.length(); j++) { PhiNode *phi = orig_phis.at(j); int alias_idx = _compile->get_alias_index(phi->adr_type()); --- 3459,3469 ---- // Phase 4: Update the inputs of non-instance memory Phis and // the Memory input of memnodes // First update the inputs of any non-instance Phi's from // which we split out an instance Phi. Note we don't have ! // to recursively process Phi's encountered on the input memory // chains as is done in split_memory_phi() since they will // also be processed here. for (int j = 0; j < orig_phis.length(); j++) { PhiNode *phi = orig_phis.at(j); int alias_idx = _compile->get_alias_index(phi->adr_type());
< prev index next >