hotspot/src/share/vm/opto/cfgnode.cpp

Print this page
rev 611 : Merge

*** 1,10 **** #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)cfgnode.cpp 1.262 08/11/24 12:22:57 JVM" #endif /* ! * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,10 ---- #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)cfgnode.cpp 1.262 08/11/24 12:22:57 JVM" #endif /* ! * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 705,714 **** --- 705,777 ---- } mem->verify_adr_type(); return mem; } + //------------------------split_out_instance----------------------------------- + // Split out an instance type from a bottom phi. + PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { + const TypeOopPtr *t_oop = at->isa_oopptr(); + assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr"); + const TypePtr *t = adr_type(); + assert(type() == Type::MEMORY && + (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || + t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && + t->is_oopptr()->cast_to_exactness(true) + ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) + ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop), + "bottom or raw memory required"); + + // Check if an appropriate node already exists. + Node *region = in(0); + for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { + Node* use = region->fast_out(k); + if( use->is_Phi()) { + PhiNode *phi2 = use->as_Phi(); + if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) { + return phi2; + } + } + } + Compile *C = igvn->C; + Arena *a = Thread::current()->resource_area(); + Node_Array node_map = new Node_Array(a); + Node_Stack stack(a, C->unique() >> 4); + PhiNode *nphi = slice_memory(at); + igvn->register_new_node_with_optimizer( nphi ); + node_map.map(_idx, nphi); + stack.push((Node *)this, 1); + while(!stack.is_empty()) { + PhiNode *ophi = stack.node()->as_Phi(); + uint i = stack.index(); + assert(i >= 1, "not control edge"); + stack.pop(); + nphi = node_map[ophi->_idx]->as_Phi(); + for (; i < ophi->req(); i++) { + Node *in = ophi->in(i); + if (in == NULL || igvn->type(in) == Type::TOP) + continue; + Node *opt = MemNode::optimize_simple_memory_chain(in, at, igvn); + PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; + if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { + opt = node_map[optphi->_idx]; + if (opt == NULL) { + stack.push(ophi, i); + nphi = optphi->slice_memory(at); + igvn->register_new_node_with_optimizer( nphi ); + node_map.map(optphi->_idx, nphi); + ophi = optphi; + i = 0; // will get incremented at top of loop + continue; + } + } + nphi->set_req(i, opt); + } + } + return nphi; + } + //------------------------verify_adr_type-------------------------------------- #ifdef ASSERT void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { if (visited.test_set(_idx)) return; //already visited
*** 794,810 **** } // Until we have harmony between classes and interfaces in the type // lattice, we must tread carefully around phis which implicitly // convert the one to the other. ! const TypeInstPtr* ttip = _type->isa_instptr(); bool is_intf = false; if (ttip != NULL) { ciKlass* k = ttip->klass(); if (k->is_loaded() && k->is_interface()) is_intf = true; } // Default case: merge all inputs const Type *t = Type::TOP; // Merged type starting value for (uint i = 1; i < req(); ++i) {// For all paths in // Reachable control path? --- 857,880 ---- } // Until we have harmony between classes and interfaces in the type // lattice, we must tread carefully around phis which implicitly // convert the one to the other. ! const TypePtr* ttp = _type->make_ptr(); ! const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL; ! const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL; bool is_intf = false; if (ttip != NULL) { ciKlass* k = ttip->klass(); if (k->is_loaded() && k->is_interface()) is_intf = true; } + if (ttkp != NULL) { + ciKlass* k = ttkp->klass(); + if (k->is_loaded() && k->is_interface()) + is_intf = true; + } // Default case: merge all inputs const Type *t = Type::TOP; // Merged type starting value for (uint i = 1; i < req(); ++i) {// For all paths in // Reachable control path?
*** 813,823 **** // We assume that each input of an interface-valued Phi is a true // subtype of that interface. This might not be true of the meet // of all the input types. The lattice is not distributive in // such cases. Ward off asserts in type.cpp by refusing to do // meets between interfaces and proper classes. ! const TypeInstPtr* tiip = ti->isa_instptr(); if (tiip) { bool ti_is_intf = false; ciKlass* k = tiip->klass(); if (k->is_loaded() && k->is_interface()) ti_is_intf = true; --- 883,894 ---- // We assume that each input of an interface-valued Phi is a true // subtype of that interface. This might not be true of the meet // of all the input types. The lattice is not distributive in // such cases. Ward off asserts in type.cpp by refusing to do // meets between interfaces and proper classes. ! const TypePtr* tip = ti->make_ptr(); ! const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL; if (tiip) { bool ti_is_intf = false; ciKlass* k = tiip->klass(); if (k->is_loaded() && k->is_interface()) ti_is_intf = true;
*** 857,866 **** --- 928,939 ---- // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows // into a Phi which "knows" it's an Interface type we'll have to // uplift the type. if( !t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface() ) { assert(ft == _type, ""); } // Uplift to interface + else if( !t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface() ) + { assert(ft == _type, ""); } // Uplift to interface // Otherwise it's something stupid like non-overlapping int ranges // found on dying counted loops. else { assert(ft == Type::TOP, ""); } // Canonical empty value }
*** 870,885 **** // If we have an interface-typed Phi and we narrow to a class type, the join // should report back the class. However, if we have a J/L/Object // class-typed Phi and an interface flows in, it's possible that the meet & // join report an interface back out. This isn't possible but happens // because the type system doesn't interact well with interfaces. ! const TypeInstPtr *jtip = jt->isa_instptr(); if( jtip && ttip ) { if( jtip->is_loaded() && jtip->klass()->is_interface() && ! ttip->is_loaded() && !ttip->klass()->is_interface() ) // Happens in a CTW of rt.jar, 320-341, no extra flags ! { assert(ft == ttip->cast_to_ptr_type(jtip->ptr()), ""); jt = ft; } } if (jt != ft && jt->base() == ft->base()) { if (jt->isa_int() && jt->is_int()->_lo == ft->is_int()->_lo && jt->is_int()->_hi == ft->is_int()->_hi) --- 943,971 ---- // If we have an interface-typed Phi and we narrow to a class type, the join // should report back the class. However, if we have a J/L/Object // class-typed Phi and an interface flows in, it's possible that the meet & // join report an interface back out. This isn't possible but happens // because the type system doesn't interact well with interfaces. ! const TypePtr *jtp = jt->make_ptr(); ! const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL; ! const TypeKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_klassptr() : NULL; if( jtip && ttip ) { if( jtip->is_loaded() && jtip->klass()->is_interface() && ! ttip->is_loaded() && !ttip->klass()->is_interface() ) { // Happens in a CTW of rt.jar, 320-341, no extra flags ! assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) || ! ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), ""); ! jt = ft; ! } ! } ! if( jtkp && ttkp ) { ! if( jtkp->is_loaded() && jtkp->klass()->is_interface() && ! ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { ! assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || ! ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); ! jt = ft; ! } } if (jt != ft && jt->base() == ft->base()) { if (jt->isa_int() && jt->is_int()->_lo == ft->is_int()->_lo && jt->is_int()->_hi == ft->is_int()->_hi)
*** 1023,1032 **** --- 1109,1120 ---- for (uint i = 1, cnt = req(); i < cnt; ++i) { Node* rc = r->in(i); if (rc == NULL || phase->type(rc) == Type::TOP) continue; // ignore unreachable control path Node* n = in(i); + if (n == NULL) + continue; Node* un = n->uncast(); if (un == NULL || un == this || phase->type(un) == Type::TOP) { continue; // ignore if top, or in(i) and "this" are in a data cycle } // Check for a unique uncasted input
*** 1285,1295 **** uint i; for( i = 1; i < phi->req()-1; i++ ) { Node *n = phi->in(i); if( !n ) return NULL; if( phase->type(n) == Type::TOP ) return NULL; ! if( n->Opcode() == Op_ConP ) break; } if( i >= phi->req() ) // Only split for constants return NULL; --- 1373,1383 ---- uint i; for( i = 1; i < phi->req()-1; i++ ) { Node *n = phi->in(i); if( !n ) return NULL; if( phase->type(n) == Type::TOP ) return NULL; ! if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN ) break; } if( i >= phi->req() ) // Only split for constants return NULL;
*** 1362,1372 **** // itself (safe for dead loops). if (in != NULL && !in->is_dead_loop_safe()) { // Check inputs of phi's inputs also. // It is much less expensive then full graph walk. uint cnt = in->req(); ! for (uint i = 1; i < cnt; ++i) { Node* m = in->in(i); if (m == (Node*)this) return UnsafeLoop; // Unsafe loop if (m != NULL && !m->is_dead_loop_safe()) { // Check the most common case (about 30% of all cases): --- 1450,1461 ---- // itself (safe for dead loops). if (in != NULL && !in->is_dead_loop_safe()) { // Check inputs of phi's inputs also. // It is much less expensive then full graph walk. uint cnt = in->req(); ! uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1; ! for (; i < cnt; ++i) { Node* m = in->in(i); if (m == (Node*)this) return UnsafeLoop; // Unsafe loop if (m != NULL && !m->is_dead_loop_safe()) { // Check the most common case (about 30% of all cases):
*** 1410,1420 **** nstack.push(in); // Start with unique input. visited.set(in->_idx); while (nstack.size() != 0) { Node* n = nstack.pop(); uint cnt = n->req(); ! for (uint i = 1; i < cnt; i++) { // Only data paths Node* m = n->in(i); if (m == (Node*)this) { return true; // Data loop } if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases. --- 1499,1510 ---- nstack.push(in); // Start with unique input. visited.set(in->_idx); while (nstack.size() != 0) { Node* n = nstack.pop(); uint cnt = n->req(); ! uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1; ! for (; i < cnt; i++) { Node* m = n->in(i); if (m == (Node*)this) { return true; // Data loop } if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases.
*** 1594,1603 **** --- 1684,1697 ---- MergeMemNode* n = ii->as_MergeMem(); // compress paths and change unreachable cycles to TOP // If not, we can update the input infinitely along a MergeMem cycle // Equivalent code is in MemNode::Ideal_common Node *m = phase->transform(n); + if (outcnt() == 0) { // Above transform() may kill us! + progress = phase->C->top(); + break; + } // If tranformed to a MergeMem, get the desired slice // Otherwise the returned node represents memory for every slice Node *new_mem = (m->is_MergeMem()) ? m->as_MergeMem()->memory_at(alias_idx) : m; // Update input if it is progress over what we have now
*** 1679,1693 **** --- 1773,1850 ---- } // Replace self with the result. return result; } } + // + // Other optimizations on the memory chain + // + const TypePtr* at = adr_type(); + for( uint i=1; i<req(); ++i ) {// For all paths in + Node *ii = in(i); + Node *new_in = MemNode::optimize_memory_chain(ii, at, phase); + if (ii != new_in ) { + set_req(i, new_in); + progress = this; + } + } + } + + #ifdef _LP64 + // Push DecodeN down through phi. + // The rest of phi graph will transform by split EncodeP node though phis up. + if (UseCompressedOops && can_reshape && progress == NULL) { + bool may_push = true; + bool has_decodeN = false; + Node* in_decodeN = NULL; + for (uint i=1; i<req(); ++i) {// For all paths in + Node *ii = in(i); + if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) { + has_decodeN = true; + in_decodeN = ii->in(1); + } else if (!ii->is_Phi()) { + may_push = false; + } + } + + if (has_decodeN && may_push) { + PhaseIterGVN *igvn = phase->is_IterGVN(); + // Note: in_decodeN is used only to define the type of new phi here. + PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN); + uint orig_cnt = req(); + for (uint i=1; i<req(); ++i) {// For all paths in + Node *ii = in(i); + Node* new_ii = NULL; + if (ii->is_DecodeN()) { + assert(ii->bottom_type() == bottom_type(), "sanity"); + new_ii = ii->in(1); + } else { + assert(ii->is_Phi(), "sanity"); + if (ii->as_Phi() == this) { + new_ii = new_phi; + } else { + new_ii = new (phase->C, 2) EncodePNode(ii, in_decodeN->bottom_type()); + igvn->register_new_node_with_optimizer(new_ii); + } + } + new_phi->set_req(i, new_ii); } + igvn->register_new_node_with_optimizer(new_phi, this); + progress = new (phase->C, 2) DecodeNNode(new_phi, bottom_type()); + } + } + #endif return progress; // Return any progress } + //------------------------------is_tripcount----------------------------------- + bool PhiNode::is_tripcount() const { + return (in(0) != NULL && in(0)->is_CountedLoop() && + in(0)->as_CountedLoop()->phi() == this); + } + //------------------------------out_RegMask------------------------------------ const RegMask &PhiNode::in_RegMask(uint i) const { return i ? out_RegMask() : RegMask::Empty; }
*** 1699,1711 **** } #ifndef PRODUCT void PhiNode::dump_spec(outputStream *st) const { TypeNode::dump_spec(st); ! if (in(0) != NULL && ! in(0)->is_CountedLoop() && ! in(0)->as_CountedLoop()->phi() == this) { st->print(" #tripcount"); } } #endif --- 1856,1866 ---- } #ifndef PRODUCT void PhiNode::dump_spec(outputStream *st) const { TypeNode::dump_spec(st); ! if (is_tripcount()) { st->print(" #tripcount"); } } #endif
*** 1890,1899 **** --- 2045,2076 ---- ? this : call->in(TypeFunc::Parms); } //============================================================================= + //------------------------------Value------------------------------------------ + // Check for being unreachable. + const Type *NeverBranchNode::Value( PhaseTransform *phase ) const { + if (!in(0) || in(0)->is_top()) return Type::TOP; + return bottom_type(); + } + + //------------------------------Ideal------------------------------------------ + // Check for no longer being part of a loop + Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (can_reshape && !in(0)->is_Loop()) { + // Dead code elimination can sometimes delete this projection so + // if it's not there, there's nothing to do. + Node* fallthru = proj_out(0); + if (fallthru != NULL) { + phase->is_IterGVN()->subsume_node(fallthru, in(0)); + } + return phase->C->top(); + } + return NULL; + } + #ifndef PRODUCT void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const { st->print("%s", Name()); } #endif