hotspot/src/share/vm/opto/callnode.hpp

Print this page
rev 611 : Merge

*** 1,10 **** #ifdef USE_PRAGMA_IDENT_HDR #pragma ident "@(#)callnode.hpp 1.195 07/10/04 14:36:00 JVM" #endif /* ! * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,10 ---- #ifdef USE_PRAGMA_IDENT_HDR #pragma ident "@(#)callnode.hpp 1.195 07/10/04 14:36:00 JVM" #endif /* ! * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 92,102 **** //------------------------------ParmNode--------------------------------------- // Incoming parameters class ParmNode : public ProjNode { static const char * const names[TypeFunc::Parms+1]; public: ! ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {} virtual int Opcode() const; virtual bool is_CFG() const { return (_con == TypeFunc::Control); } virtual uint ideal_reg() const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; --- 92,104 ---- //------------------------------ParmNode--------------------------------------- // Incoming parameters class ParmNode : public ProjNode { static const char * const names[TypeFunc::Parms+1]; public: ! ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { ! init_class_id(Class_Parm); ! } virtual int Opcode() const; virtual bool is_CFG() const { return (_con == TypeFunc::Control); } virtual uint ideal_reg() const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const;
*** 183,192 **** --- 185,195 ---- JVMState* _caller; // List pointer for forming scope chains uint _depth; // One mroe than caller depth, or one. uint _locoff; // Offset to locals in input edge mapping uint _stkoff; // Offset to stack in input edge mapping uint _monoff; // Offset to monitors in input edge mapping + uint _scloff; // Offset to fields of scalar objs in input edge mapping uint _endoff; // Offset to end of input edge mapping uint _sp; // Jave Expression Stack Pointer for this state int _bci; // Byte Code Index of this JVM point ciMethod* _method; // Method Pointer SafePointNode* _map; // Map node associated with this scope
*** 206,236 **** // Access functions for the JVM uint locoff() const { return _locoff; } uint stkoff() const { return _stkoff; } uint argoff() const { return _stkoff + _sp; } uint monoff() const { return _monoff; } uint endoff() const { return _endoff; } uint oopoff() const { return debug_end(); } int loc_size() const { return _stkoff - _locoff; } int stk_size() const { return _monoff - _stkoff; } ! int mon_size() const { return _endoff - _monoff; } bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } ! bool is_mon(uint i) const { return i >= _monoff && i < _endoff; } uint sp() const { return _sp; } int bci() const { return _bci; } bool has_method() const { return _method != NULL; } ciMethod* method() const { assert(has_method(), ""); return _method; } JVMState* caller() const { return _caller; } SafePointNode* map() const { return _map; } uint depth() const { return _depth; } uint debug_start() const; // returns locoff of root caller uint debug_end() const; // returns endoff of self ! uint debug_size() const { return loc_size() + sp() + mon_size(); } uint debug_depth() const; // returns sum of debug_size values at all depths // Returns the JVM state at the desired depth (1 == root). JVMState* of_depth(int d) const; --- 209,244 ---- // Access functions for the JVM uint locoff() const { return _locoff; } uint stkoff() const { return _stkoff; } uint argoff() const { return _stkoff + _sp; } uint monoff() const { return _monoff; } + uint scloff() const { return _scloff; } uint endoff() const { return _endoff; } uint oopoff() const { return debug_end(); } int loc_size() const { return _stkoff - _locoff; } int stk_size() const { return _monoff - _stkoff; } ! int mon_size() const { return _scloff - _monoff; } ! int scl_size() const { return _endoff - _scloff; } bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } ! bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } ! bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } uint sp() const { return _sp; } int bci() const { return _bci; } bool has_method() const { return _method != NULL; } ciMethod* method() const { assert(has_method(), ""); return _method; } JVMState* caller() const { return _caller; } SafePointNode* map() const { return _map; } uint depth() const { return _depth; } uint debug_start() const; // returns locoff of root caller uint debug_end() const; // returns endoff of self ! uint debug_size() const { ! return loc_size() + sp() + mon_size() + scl_size(); ! } uint debug_depth() const; // returns sum of debug_size values at all depths // Returns the JVM state at the desired depth (1 == root). JVMState* of_depth(int d) const;
*** 253,264 **** // Initialization functions for the JVM void set_locoff(uint off) { _locoff = off; } void set_stkoff(uint off) { _stkoff = off; } void set_monoff(uint off) { _monoff = off; } void set_endoff(uint off) { _endoff = off; } ! void set_offsets(uint off) { _locoff = _stkoff = _monoff = _endoff = off; } void set_map(SafePointNode *map) { _map = map; } void set_sp(uint sp) { _sp = sp; } void set_bci(int bci) { _bci = bci; } // Miscellaneous utility functions --- 261,275 ---- // Initialization functions for the JVM void set_locoff(uint off) { _locoff = off; } void set_stkoff(uint off) { _stkoff = off; } void set_monoff(uint off) { _monoff = off; } + void set_scloff(uint off) { _scloff = off; } void set_endoff(uint off) { _endoff = off; } ! void set_offsets(uint off) { ! _locoff = _stkoff = _monoff = _scloff = _endoff = off; ! } void set_map(SafePointNode *map) { _map = map; } void set_sp(uint sp) { _sp = sp; } void set_bci(int bci) { _bci = bci; } // Miscellaneous utility functions
*** 398,426 **** #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif }; //------------------------------CallNode--------------------------------------- // Call nodes now subsume the function of debug nodes at callsites, so they // contain the functionality of a full scope chain of debug nodes. class CallNode : public SafePointNode { public: const TypeFunc *_tf; // Function type address _entry_point; // Address of method being called float _cnt; // Estimate of number of times called - PointsToNode::EscapeState _escape_state; CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) : SafePointNode(tf->domain()->cnt(), NULL, adr_type), _tf(tf), _entry_point(addr), _cnt(COUNT_UNKNOWN) { init_class_id(Class_Call); init_flags(Flag_is_Call); - _escape_state = PointsToNode::UnknownEscape; } const TypeFunc* tf() const { return _tf; } const address entry_point() const { return _entry_point; } const float cnt() const { return _cnt; } --- 409,480 ---- #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif }; + //------------------------------SafePointScalarObjectNode---------------------- + // A SafePointScalarObjectNode represents the state of a scalarized object + // at a safepoint. + + class SafePointScalarObjectNode: public TypeNode { + uint _first_index; // First input edge index of a SafePoint node where + // states of the scalarized object fields are collected. + uint _n_fields; // Number of non-static fields of the scalarized object. + DEBUG_ONLY(AllocateNode* _alloc;) + public: + SafePointScalarObjectNode(const TypeOopPtr* tp, + #ifdef ASSERT + AllocateNode* alloc, + #endif + uint first_index, uint n_fields); + virtual int Opcode() const; + virtual uint ideal_reg() const; + virtual const RegMask &in_RegMask(uint) const; + virtual const RegMask &out_RegMask() const; + virtual uint match_edge(uint idx) const; + + uint first_index() const { return _first_index; } + uint n_fields() const { return _n_fields; } + DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; }) + + // SafePointScalarObject should be always pinned to the control edge + // of the SafePoint node for which it was generated. + virtual bool pinned() const; // { return true; } + + virtual uint size_of() const { return sizeof(*this); } + + // Assumes that "this" is an argument to a safepoint node "s", and that + // "new_call" is being created to correspond to "s". But the difference + // between the start index of the jvmstates of "new_call" and "s" is + // "jvms_adj". Produce and return a SafePointScalarObjectNode that + // corresponds appropriately to "this" in "new_call". Assumes that + // "sosn_map" is a map, specific to the translation of "s" to "new_call", + // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. + SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; + + #ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; + #endif + }; + //------------------------------CallNode--------------------------------------- // Call nodes now subsume the function of debug nodes at callsites, so they // contain the functionality of a full scope chain of debug nodes. class CallNode : public SafePointNode { public: const TypeFunc *_tf; // Function type address _entry_point; // Address of method being called float _cnt; // Estimate of number of times called CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) : SafePointNode(tf->domain()->cnt(), NULL, adr_type), _tf(tf), _entry_point(addr), _cnt(COUNT_UNKNOWN) { init_class_id(Class_Call); init_flags(Flag_is_Call); } const TypeFunc* tf() const { return _tf; } const address entry_point() const { return _entry_point; } const float cnt() const { return _cnt; }
*** 442,451 **** --- 496,514 ---- virtual bool guaranteed_safepoint() { return true; } // For macro nodes, the JVMState gets modified during expansion, so when cloning // the node the JVMState must be cloned. virtual void clone_jvms() { } // default is not to clone + // Returns true if the call may modify n + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); + // Does this node have a use of n other than in debug information? + bool has_non_debug_use(Node *n); + // Returns the unique CheckCastPP of a call + // or result projection is there are several CheckCastPP + // or returns NULL if there is no one. + Node *result_cast(); + virtual uint match_edge(uint idx) const; #ifndef PRODUCT virtual void dump_req() const; virtual void dump_spec(outputStream *st) const;
*** 625,634 **** --- 688,699 ---- const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); return TypeFunc::make(domain, range); } + bool _is_scalar_replaceable; // Result of Escape Analysis + virtual uint size_of() const; // Size is bigger AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node *size, Node *klass_node, Node *initial_test); // Expansion modifies the JVMState, so we need to clone it virtual void clone_jvms() {
*** 636,645 **** --- 701,713 ---- } virtual int Opcode() const; virtual uint ideal_reg() const { return Op_RegP; } virtual bool guaranteed_safepoint() { return false; } + // allocations do not modify their arguments + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} + // Pattern-match a possible usage of AllocateNode. // Return null if no allocation is recognized. // The operand is the pointer produced by the (possible) allocation. // It must be a projection of the Allocate or its subsequent CastPP. // (Note: This function is defined in file graphKit.cpp, near
*** 659,669 **** return (allo == NULL) ? NULL : allo->in(KlassNode); } // Conservatively small estimate of offset of first non-header byte. int minimum_header_size() { ! return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc); } // Return the corresponding initialization barrier (or null if none). // Walks out edges to find it... // (Note: Both InitializeNode::allocation and AllocateNode::initialization --- 727,738 ---- return (allo == NULL) ? NULL : allo->in(KlassNode); } // Conservatively small estimate of offset of first non-header byte. int minimum_header_size() { ! return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : ! instanceOopDesc::base_offset_in_bytes(); } // Return the corresponding initialization barrier (or null if none). // Walks out edges to find it... // (Note: Both InitializeNode::allocation and AllocateNode::initialization
*** 691,719 **** set_req(AllocateNode::ALength, count_val); } virtual int Opcode() const; virtual uint size_of() const; // Size is bigger // Pattern-match a possible usage of AllocateArrayNode. // Return null if no allocation is recognized. static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { AllocateNode* allo = Ideal_allocation(ptr, phase); return (allo == NULL || !allo->is_AllocateArray()) ? NULL : allo->as_AllocateArray(); } - - // Dig the length operand out of a (possible) array allocation site. - static Node* Ideal_length(Node* ptr, PhaseTransform* phase) { - AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase); - return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength); - } }; //------------------------------AbstractLockNode----------------------------------- class AbstractLockNode: public CallNode { private: bool _eliminate; // indicates this lock can be safely eliminated #ifndef PRODUCT NamedCounter* _counter; #endif protected: --- 760,792 ---- set_req(AllocateNode::ALength, count_val); } virtual int Opcode() const; virtual uint size_of() const; // Size is bigger + // Dig the length operand out of a array allocation site. + Node* Ideal_length() { + return in(AllocateNode::ALength); + } + + // Dig the length operand out of a array allocation site and narrow the + // type with a CastII, if necesssary + Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); + // Pattern-match a possible usage of AllocateArrayNode. // Return null if no allocation is recognized. static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { AllocateNode* allo = Ideal_allocation(ptr, phase); return (allo == NULL || !allo->is_AllocateArray()) ? NULL : allo->as_AllocateArray(); } }; //------------------------------AbstractLockNode----------------------------------- class AbstractLockNode: public CallNode { private: bool _eliminate; // indicates this lock can be safely eliminated + bool _coarsened; // indicates this lock was coarsened #ifndef PRODUCT NamedCounter* _counter; #endif protected:
*** 730,739 **** --- 803,813 ---- public: AbstractLockNode(const TypeFunc *tf) : CallNode(tf, NULL, TypeRawPtr::BOTTOM), + _coarsened(false), _eliminate(false) { #ifndef PRODUCT _counter = NULL; #endif
*** 748,757 **** --- 822,837 ---- bool is_eliminated() {return _eliminate; } // mark node as eliminated and update the counter if there is one void set_eliminated(); + bool is_coarsened() { return _coarsened; } + void set_coarsened() { _coarsened = true; } + + // locking does not modify its arguments + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} + #ifndef PRODUCT void create_lock_counter(JVMState* s); NamedCounter* counter() const { return _counter; } #endif };