< prev index next >

src/share/vm/opto/output.cpp

Print this page

        

@@ -103,12 +103,12 @@
   // Insert epilogs before every return
   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
     Block* block = _cfg->get_block(i);
     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
       Node* m = block->end();
-      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
-        MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
+      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Opcodes::Op_Halt) {
+        MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Opcodes::Op_Return);
         block->add_inst(epilog);
         _cfg->map_node_to_block(epilog, block);
       }
     }
   }

@@ -379,11 +379,11 @@
         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
         int j;
         // Find the branch; ignore trailing NOPs.
         for (j = block->number_of_nodes()-1; j>=0; j--) {
           Node* n = block->get_node(j);
-          if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
+          if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Opcodes::Op_Con)
             break;
         }
         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 #endif
         int br_size = jmp_size[i];

@@ -1311,11 +1311,11 @@
               n    = replacement;
               mach = replacement;
             }
           }
           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
-        } else if (mach->ideal_Opcode() == Op_Jump) {
+        } else if (mach->ideal_Opcode() == Opcodes::Op_Jump) {
           for (uint h = 0; h < block->_num_succs; h++) {
             Block* succs_block = block->_succs[h];
             for (uint j = 1; j < succs_block->num_preds(); j++) {
               Node* jpn = succs_block->pred(j);
               if (jpn->is_JumpProj() && jpn->in(0) == mach) {

@@ -1326,11 +1326,11 @@
             }
           }
         }
 #ifdef ASSERT
         // Check that oop-store precedes the card-mark
-        else if (mach->ideal_Opcode() == Op_StoreCM) {
+        else if (mach->ideal_Opcode() == Opcodes::Op_StoreCM) {
           uint storeCM_idx = j;
           int count = 0;
           for (uint prec = mach->req(); prec < mach->len(); prec++) {
             Node *oop_store = mach->in(prec);  // Precedence edge
             if (oop_store == NULL) continue;

@@ -1570,11 +1570,11 @@
     int j;
 
     // Find the branch; ignore trailing NOPs.
     for (j = block->number_of_nodes() - 1; j >= 0; j--) {
       n = block->get_node(j);
-      if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
+      if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Opcodes::Op_Con) {
         break;
       }
     }
 
     // If we didn't find anything, continue

@@ -1965,19 +1965,19 @@
     if (_current_latency[_available[i]->_idx] > latency)
       break;
 
   // Special Check for compares following branches
   if( n->is_Mach() && _scheduled.size() > 0 ) {
-    int op = n->as_Mach()->ideal_Opcode();
+    Opcodes op = n->as_Mach()->ideal_Opcode();
     Node *last = _scheduled[0];
     if( last->is_MachIf() && last->in(1) == n &&
-        ( op == Op_CmpI ||
-          op == Op_CmpU ||
-          op == Op_CmpP ||
-          op == Op_CmpF ||
-          op == Op_CmpD ||
-          op == Op_CmpL ) ) {
+        ( op == Opcodes::Op_CmpI ||
+          op == Opcodes::Op_CmpU ||
+          op == Opcodes::Op_CmpP ||
+          op == Opcodes::Op_CmpF ||
+          op == Opcodes::Op_CmpD ||
+          op == Opcodes::Op_CmpL ) ) {
 
       // Recalculate position, moving to front of same latency
       for ( i=0 ; i < _available.size(); i++ )
         if (_current_latency[_available[i]->_idx] >= latency)
           break;

@@ -2198,15 +2198,15 @@
 
   // It's possible to have a BoxLock in the graph and in the _bbs mapping but
   // not in the bb->_nodes array.  This happens for debug-info-only BoxLocks.
   // 'Schedule' them (basically ignore in the schedule) but do not insert them
   // into the block.  All other scheduled nodes get put in the schedule here.
-  int op = n->Opcode();
-  if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
-      (op != Op_Node &&         // Not an unused antidepedence node and
+  Opcodes op = n->Opcode();
+  if( (op == Opcodes::Op_Node && n->req() == 0) || // anti-dependence node OR
+      (op != Opcodes::Op_Node &&         // Not an unused antidepedence node and
        // not an unallocated boxlock
-       (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
+       (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Opcodes::Op_BoxLock)) ) {
 
     // Push any trailing projections
     if( bb->get_node(bb->number_of_nodes()-1) != n ) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
         Node *foi = n->fast_out(i);

@@ -2353,14 +2353,14 @@
       Node *n = bb->get_node(_bb_start);
       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
       // Also, MachIdealNodes do not get scheduled
       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
       MachNode *mach = n->as_Mach();
-      int iop = mach->ideal_Opcode();
-      if( iop == Op_CreateEx ) continue; // CreateEx is pinned
-      if( iop == Op_Con ) continue;      // Do not schedule Top
-      if( iop == Op_Node &&     // Do not schedule PhiNodes, ProjNodes
+      Opcodes iop = mach->ideal_Opcode();
+      if( iop == Opcodes::Op_CreateEx ) continue; // CreateEx is pinned
+      if( iop == Opcodes::Op_Con ) continue;      // Do not schedule Top
+      if( iop == Opcodes::Op_Node &&     // Do not schedule PhiNodes, ProjNodes
           mach->pipeline() == MachNode::pipeline_class() &&
           !n->is_SpillCopy() && !n->is_MachMerge() )  // Breakpoints, Prolog, etc
         continue;
       break;                    // Funny loop structure to be sure...
     }

@@ -2371,17 +2371,17 @@
     // have their delay slots filled in the template expansions, so we don't
     // bother scheduling them.
     Node *last = bb->get_node(_bb_end);
     // Ignore trailing NOPs.
     while (_bb_end > 0 && last->is_Mach() &&
-           last->as_Mach()->ideal_Opcode() == Op_Con) {
+           last->as_Mach()->ideal_Opcode() == Opcodes::Op_Con) {
       last = bb->get_node(--_bb_end);
     }
-    assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
+    assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Opcodes::Op_Con, "");
     if( last->is_Catch() ||
        // Exclude unreachable path case when Halt node is in a separate block.
-       (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
+       (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Opcodes::Op_Halt) ) {
       // There must be a prior call.  Skip it.
       while( !bb->get_node(--_bb_end)->is_MachCall() ) {
         assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
       }
     } else if( last->is_MachNullCheck() ) {

@@ -2500,20 +2500,20 @@
   // Walk over the block backwards.  Check to make sure each DEF doesn't
   // kill a live value (other than the one it's supposed to).  Add each
   // USE to the live set.
   for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
     Node *n = b->get_node(i);
-    int n_op = n->Opcode();
-    if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
+    Opcodes n_op = n->Opcode();
+    if( n_op == Opcodes::Op_MachProj && n->ideal_reg() == static_cast<Opcodes>(MachProjNode::projType::fat_proj) ) {
       // Fat-proj kills a slew of registers
       RegMask rm = n->out_RegMask();// Make local copy
       while( rm.is_NotEmpty() ) {
         OptoReg::Name kill = rm.find_first_elem();
         rm.Remove(kill);
         verify_do_def( n, kill, msg );
       }
-    } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
+    } else if( n_op != Opcodes::Op_Node ) { // Avoid brand new antidependence nodes
       // Get DEF'd registers the normal way
       verify_do_def( n, _regalloc->get_reg_first(n), msg );
       verify_do_def( n, _regalloc->get_reg_second(n), msg );
     }
 

@@ -2569,11 +2569,11 @@
   Node *later_def = NULL;
 
   // Finding a kill requires a real pinch-point.
   // Check for not already having a pinch-point.
   // Pinch points are Op_Node's.
-  if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
+  if( pinch->Opcode() != Opcodes::Op_Node ) { // Or later-def/kill as pinch-point?
     later_def = pinch;            // Must be def/kill as optimistic pinch-point
     if ( _pinch_free_list.size() > 0) {
       pinch = _pinch_free_list.pop();
     } else {
       pinch = new Node(1); // Pinch point to-be

@@ -2583,11 +2583,11 @@
       return;
     }
     _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
     _reg_node.map(def_reg,pinch); // Record pinch-point
     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
-    if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
+    if( later_def->outcnt() == 0 || later_def->ideal_reg() == static_cast<Opcodes>(MachProjNode::projType::fat_proj) ) { // Distinguish def from kill
       pinch->init_req(0, _cfg->C->top());     // set not NULL for the next call
       add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
       later_def = NULL;           // and no later def
     }
     pinch->set_req(0,later_def);  // Hook later def so we can find it

@@ -2599,11 +2599,11 @@
   // Add output-dependence edge from later def to kill
   if( later_def )               // If there is some original def
     add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
 
   // See if current kill is also a use, and so is forced to be the pinch-point.
-  if( pinch->Opcode() == Op_Node ) {
+  if( pinch->Opcode() == Opcodes::Op_Node ) {
     Node *uses = kill->is_Proj() ? kill->in(0) : kill;
     for( uint i=1; i<uses->req(); i++ ) {
       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
         // Yes, found a use/kill pinch-point

@@ -2626,11 +2626,11 @@
   Node *pinch = _reg_node[use_reg]; // Get pinch point
   // Check for no later def_reg/kill in block
   if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
       // Use has to be block-local as well
       _cfg->get_block_for_node(use) == b) {
-    if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
+    if( pinch->Opcode() == Opcodes::Op_Node && // Real pinch-point (not optimistic?)
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
       // Insert the pinch-point in the block just after the last use
       b->insert_node(pinch, b->find_node(use) + 1);
       _bb_end++;                // Increase size scheduled region in block

@@ -2687,11 +2687,11 @@
   Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
   Node* last_safept_node = end_node;
   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
     Node *n = b->get_node(i);
     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
-    if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
+    if( n->is_MachProj() && n->ideal_reg() == static_cast<Opcodes>(MachProjNode::projType::fat_proj) ) {
       // Fat-proj kills a slew of registers
       // This can add edges to 'n' and obscure whether or not it was a def,
       // hence the is_def flag.
       fat_proj_seen = true;
       RegMask rm = n->out_RegMask();// Make local copy

@@ -2707,11 +2707,11 @@
     }
 
     // Kill projections on a branch should appear to occur on the
     // branch, not afterwards, so grab the masks from the projections
     // and process them.
-    if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
+    if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Opcodes::Op_Jump) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
         Node* use = n->fast_out(i);
         if (use->is_Proj()) {
           RegMask rm = use->out_RegMask();// Make local copy
           while( rm.is_NotEmpty() ) {

@@ -2726,11 +2726,11 @@
     // Check each register used by this instruction for a following DEF/KILL
     // that must occur afterward and requires an anti-dependence edge.
     for( uint j=0; j<n->req(); j++ ) {
       Node *def = n->in(j);
       if( def ) {
-        assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
+        assert( !def->is_MachProj() || def->ideal_reg() != static_cast<Opcodes>(MachProjNode::projType::fat_proj), "" );
         anti_do_use( b, n, _regalloc->get_reg_first(def) );
         anti_do_use( b, n, _regalloc->get_reg_second(def) );
       }
     }
     // Do not allow defs of new derived values to float above GC

@@ -2756,11 +2756,11 @@
       if( b->get_node(last_safept) != last_safept_node ) {
         last_safept = b->find_node(last_safept_node);
       }
       for( uint j=last_safept; j > i; j-- ) {
         Node *mach = b->get_node(j);
-        if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
+        if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Opcodes::Op_AddP )
           mach->add_prec( n );
       }
       last_safept = i;
       last_safept_node = m;
     }

@@ -2802,11 +2802,11 @@
     if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
 #endif
     int trace_cnt = 0;
     for (uint k = 0; k < _reg_node.Size(); k++) {
       Node* pinch = _reg_node[k];
-      if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
+      if ((pinch != NULL) && pinch->Opcode() == Opcodes::Op_Node &&
           // no predecence input edges
           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
         cleanup_pinch(pinch);
         _pinch_free_list.push(pinch);
         _reg_node.map(k, NULL);

@@ -2827,11 +2827,11 @@
 #endif
 }
 
 // Clean up a pinch node for reuse.
 void Scheduling::cleanup_pinch( Node *pinch ) {
-  assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
+  assert (pinch && pinch->Opcode() == Opcodes::Op_Node && pinch->req() == 1, "just checking");
 
   for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
     Node* use = pinch->last_out(i);
     uint uses_found = 0;
     for (uint j = use->req(); j < use->len(); j++) {
< prev index next >