src/share/vm/opto/block.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8022284 Cdiff src/share/vm/opto/block.hpp

src/share/vm/opto/block.hpp

Print this page

        

*** 46,62 **** // allocation I do not need a destructor to reclaim storage. class Block_Array : public ResourceObj { friend class VMStructs; uint _size; // allocated size, as opposed to formal limit debug_only(uint _limit;) // limit to formal domain protected: Block **_blocks; void grow( uint i ); // Grow array node to fit public: - Arena *_arena; // Arena to allocate in - Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { debug_only(_limit=0); _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); for( int i = 0; i < OptoBlockListSize; i++ ) { _blocks[i] = NULL; --- 46,61 ---- // allocation I do not need a destructor to reclaim storage. class Block_Array : public ResourceObj { friend class VMStructs; uint _size; // allocated size, as opposed to formal limit debug_only(uint _limit;) // limit to formal domain + Arena *_arena; // Arena to allocate in protected: Block **_blocks; void grow( uint i ); // Grow array node to fit public: Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { debug_only(_limit=0); _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); for( int i = 0; i < OptoBlockListSize; i++ ) { _blocks[i] = NULL;
*** 282,300 **** void find_remove( const Node *n ); // helper function that adds caller save registers to MachProjNode void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe); // Schedule a call next in the block ! uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call); // Perform basic-block local scheduling Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot); ! void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); ! void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call); // Cleanup if any code lands between a Call and his Catch ! void call_catch_cleanup(Block_Array &bbs, Compile *C); // Detect implicit-null-check opportunities. Basically, find NULL checks // with suitable memory ops nearby. Use the memory op to do the NULL check. // I can generate a memory op if there is not one nearby. void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); --- 281,299 ---- void find_remove( const Node *n ); // helper function that adds caller save registers to MachProjNode void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe); // Schedule a call next in the block ! uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call); // Perform basic-block local scheduling Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot); ! void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg); ! void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg); bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call); // Cleanup if any code lands between a Call and his Catch ! void call_catch_cleanup(PhaseCFG* cfg, Compile *C); // Detect implicit-null-check opportunities. Basically, find NULL checks // with suitable memory ops nearby. Use the memory op to do the NULL check. // I can generate a memory op if there is not one nearby. void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
*** 329,356 **** // Examine block's code shape to predict if it is not commonly executed. bool has_uncommon_code() const; // Use frequency calculations and code shape to predict if the block // is uncommon. ! bool is_uncommon( Block_Array &bbs ) const; #ifndef PRODUCT // Debugging print of basic block void dump_bidx(const Block* orig, outputStream* st = tty) const; ! void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const; ! void dump_head( const Block_Array *bbs, outputStream* st = tty ) const; void dump() const; ! void dump( const Block_Array *bbs ) const; #endif }; //------------------------------PhaseCFG--------------------------------------- // Build an array of Basic Block pointers, one per Node. class PhaseCFG : public Phase { friend class VMStructs; private: // Build a proper looking cfg. Return count of basic blocks uint build_cfg(); // Perform DFS search. // Setup 'vertex' as DFS to vertex mapping. --- 328,361 ---- // Examine block's code shape to predict if it is not commonly executed. bool has_uncommon_code() const; // Use frequency calculations and code shape to predict if the block // is uncommon. ! bool is_uncommon(PhaseCFG* cfg) const; #ifndef PRODUCT // Debugging print of basic block void dump_bidx(const Block* orig, outputStream* st = tty) const; ! void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const; ! void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const; void dump() const; ! void dump(const PhaseCFG* cfg) const; #endif }; //------------------------------PhaseCFG--------------------------------------- // Build an array of Basic Block pointers, one per Node. class PhaseCFG : public Phase { friend class VMStructs; private: + // Arena for the blocks to be stored in + Arena* _block_arena; + + // Map nodes to owning basic block + Block_Array _node_to_block_mapping; + // Build a proper looking cfg. Return count of basic blocks uint build_cfg(); // Perform DFS search. // Setup 'vertex' as DFS to vertex mapping.
*** 369,394 **** // I'll need a few machine-specific GotoNodes. Clone from this one. MachNode *_goto; Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); void verify_anti_dependences(Block* LCA, Node* load) { ! assert(LCA == _bbs[load->_idx], "should already be scheduled"); insert_anti_dependences(LCA, load, true); } public: ! PhaseCFG( Arena *a, RootNode *r, Matcher &m ); uint _num_blocks; // Count of basic blocks Block_List _blocks; // List of basic blocks RootNode *_root; // Root of whole program - Block_Array _bbs; // Map Nodes to owning Basic Block Block *_broot; // Basic block of root uint _rpo_ctr; CFGLoop* _root_loop; float _outer_loop_freq; // Outmost loop frequency // Per node latency estimation, valid only during GCM GrowableArray<uint> *_node_latency; #ifndef PRODUCT bool _trace_opto_pipelining; // tracing flag --- 374,419 ---- // I'll need a few machine-specific GotoNodes. Clone from this one. MachNode *_goto; Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); void verify_anti_dependences(Block* LCA, Node* load) { ! assert(LCA == get_block_for_node(load), "should already be scheduled"); insert_anti_dependences(LCA, load, true); } public: ! PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher); uint _num_blocks; // Count of basic blocks Block_List _blocks; // List of basic blocks RootNode *_root; // Root of whole program Block *_broot; // Basic block of root uint _rpo_ctr; CFGLoop* _root_loop; float _outer_loop_freq; // Outmost loop frequency + + // set which block this node should reside in + void map_node_to_block(const Node* node, Block* block) { + _node_to_block_mapping.map(node->_idx, block); + } + + // removes the mapping from a node to a block + void unmap_node_from_block(const Node* node) { + _node_to_block_mapping.map(node->_idx, NULL); + } + + // get the block in which this node resides + Block* get_block_for_node(const Node* node) const { + return _node_to_block_mapping[node->_idx]; + } + + // does this node reside in a block; return true + bool has_block(const Node* node) const { + return (_node_to_block_mapping.lookup(node->_idx) != NULL); + } + // Per node latency estimation, valid only during GCM GrowableArray<uint> *_node_latency; #ifndef PRODUCT bool _trace_opto_pipelining; // tracing flag
*** 403,413 **** // Estimate block frequencies based on IfNode probabilities void Estimate_Block_Frequency(); // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific ! // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); // Compute the (backwards) latency of a node from the uses void latency_from_uses(Node *n); --- 428,438 ---- // Estimate block frequencies based on IfNode probabilities void Estimate_Block_Frequency(); // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific ! // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block. void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); // Compute the (backwards) latency of a node from the uses void latency_from_uses(Node *n);
*** 452,462 **** CFGLoop* create_loop_tree(); // Insert a node into a block, and update the _bbs void insert( Block *b, uint idx, Node *n ) { b->_nodes.insert( idx, n ); ! _bbs.map( n->_idx, b ); } #ifndef PRODUCT bool trace_opto_pipelining() const { return _trace_opto_pipelining; } --- 477,487 ---- CFGLoop* create_loop_tree(); // Insert a node into a block, and update the _bbs void insert( Block *b, uint idx, Node *n ) { b->_nodes.insert( idx, n ); ! map_node_to_block(n, b); } #ifndef PRODUCT bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
*** 541,551 **** _parent(NULL), _sibling(NULL), _child(NULL), _exit_prob(1.0f) {} CFGLoop* parent() { return _parent; } ! void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); void add_member(CFGElement *s) { _members.push(s); } void add_nested_loop(CFGLoop* cl); Block* head() { assert(_members.at(0)->is_block(), "head must be a block"); Block* hd = _members.at(0)->as_Block(); --- 566,576 ---- _parent(NULL), _sibling(NULL), _child(NULL), _exit_prob(1.0f) {} CFGLoop* parent() { return _parent; } ! void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg); void add_member(CFGElement *s) { _members.push(s); } void add_nested_loop(CFGLoop* cl); Block* head() { assert(_members.at(0)->is_block(), "head must be a block"); Block* hd = _members.at(0)->as_Block();
src/share/vm/opto/block.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File