src/share/vm/opto/matcher.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6709742 Sdiff src/share/vm/opto

src/share/vm/opto/matcher.hpp

Print this page




  92   }
  93   bool    has_new_node(const Node* n) const {
  94     return _nodes.at(n->_idx) != NULL;
  95   }
  96   Node*       new_node(const Node* n) const {
  97     assert(has_new_node(n), "set before get");
  98     return _nodes.at(n->_idx);
  99   }
 100   void    set_new_node(const Node* n, Node *nn) {
 101     assert(!has_new_node(n), "set only once");
 102     _nodes.map(n->_idx, nn);
 103   }
 104 
 105 #ifdef ASSERT
 106   // Make sure only new nodes are reachable from this node
 107   void verify_new_nodes_only(Node* root);
 108 
 109   Node* _mem_node;   // Ideal memory node consumed by mach node
 110 #endif
 111 



 112 public:
 113   int LabelRootDepth;
 114   static const int base2reg[];        // Map Types to machine register types
 115   // Convert ideal machine register to a register mask for spill-loads
 116   static const RegMask *idealreg2regmask[];
 117   RegMask *idealreg2spillmask[_last_machine_leaf];
 118   RegMask *idealreg2debugmask[_last_machine_leaf];
 119   void init_spill_mask( Node *ret );
 120   // Convert machine register number to register mask
 121   static uint mreg2regmask_max;
 122   static RegMask mreg2regmask[];
 123   static RegMask STACK_ONLY_mask;
 124 


 125   bool    is_shared( Node *n ) { return _shared.test(n->_idx) != 0; }
 126   void   set_shared( Node *n ) {  _shared.set(n->_idx); }
 127   bool   is_visited( Node *n ) { return _visited.test(n->_idx) != 0; }
 128   void  set_visited( Node *n ) { _visited.set(n->_idx); }
 129   bool  is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; }
 130   void set_dontcare( Node *n ) {  _dontcare.set(n->_idx); }
 131 
 132   // Mode bit to tell DFA and expand rules whether we are running after
 133   // (or during) register selection.  Usually, the matcher runs before,
 134   // but it will also get called to generate post-allocation spill code.
 135   // In this situation, it is a deadly error to attempt to allocate more
 136   // temporary registers.
 137   bool _allocation_started;
 138 
 139   // Machine register names
 140   static const char *regName[];
 141   // Machine register encodings
 142   static const unsigned char _regEncode[];
 143   // Machine Node names
 144   const char **_ruleName;




  92   }
  93   bool    has_new_node(const Node* n) const {
  94     return _nodes.at(n->_idx) != NULL;
  95   }
  96   Node*       new_node(const Node* n) const {
  97     assert(has_new_node(n), "set before get");
  98     return _nodes.at(n->_idx);
  99   }
 100   void    set_new_node(const Node* n, Node *nn) {
 101     assert(!has_new_node(n), "set only once");
 102     _nodes.map(n->_idx, nn);
 103   }
 104 
 105 #ifdef ASSERT
 106   // Make sure only new nodes are reachable from this node
 107   void verify_new_nodes_only(Node* root);
 108 
 109   Node* _mem_node;   // Ideal memory node consumed by mach node
 110 #endif
 111 
 112   // Mach node for ConP #NULL
 113   MachNode* _mach_null;
 114 
 115 public:
 116   int LabelRootDepth;
 117   static const int base2reg[];        // Map Types to machine register types
 118   // Convert ideal machine register to a register mask for spill-loads
 119   static const RegMask *idealreg2regmask[];
 120   RegMask *idealreg2spillmask[_last_machine_leaf];
 121   RegMask *idealreg2debugmask[_last_machine_leaf];
 122   void init_spill_mask( Node *ret );
 123   // Convert machine register number to register mask
 124   static uint mreg2regmask_max;
 125   static RegMask mreg2regmask[];
 126   static RegMask STACK_ONLY_mask;
 127 
 128   MachNode* mach_null() const { return _mach_null; }
 129 
 130   bool    is_shared( Node *n ) { return _shared.test(n->_idx) != 0; }
 131   void   set_shared( Node *n ) {  _shared.set(n->_idx); }
 132   bool   is_visited( Node *n ) { return _visited.test(n->_idx) != 0; }
 133   void  set_visited( Node *n ) { _visited.set(n->_idx); }
 134   bool  is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; }
 135   void set_dontcare( Node *n ) {  _dontcare.set(n->_idx); }
 136 
 137   // Mode bit to tell DFA and expand rules whether we are running after
 138   // (or during) register selection.  Usually, the matcher runs before,
 139   // but it will also get called to generate post-allocation spill code.
 140   // In this situation, it is a deadly error to attempt to allocate more
 141   // temporary registers.
 142   bool _allocation_started;
 143 
 144   // Machine register names
 145   static const char *regName[];
 146   // Machine register encodings
 147   static const unsigned char _regEncode[];
 148   // Machine Node names
 149   const char **_ruleName;


src/share/vm/opto/matcher.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File