< prev index next >

src/share/vm/opto/reg_split.cpp

Print this page

        

@@ -58,23 +58,23 @@
 // not cover the input (or output), use the input (or output) mask instead.
 Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx) {
   // If ideal reg doesn't exist we've got a bad schedule happening
   // that is forcing us to spill something that isn't spillable.
   // Bail rather than abort
-  int ireg = def->ideal_reg();
-  if (ireg == 0 || ireg == Op_RegFlags) {
-    assert(false, "attempted to spill a non-spillable item: %d: %s <- %d: %s, ireg = %d, spill_type: %s",
-           def->_idx, def->Name(), use->_idx, use->Name(), ireg,
+  Opcodes ireg = def->ideal_reg();
+  if (ireg == Opcodes::Op_Node || ireg == Opcodes::Op_RegFlags) {
+    assert(false, "attempted to spill a non-spillable item: %d: %s <- %d: %s, ireg = %u, spill_type: %s",
+           def->_idx, def->Name(), use->_idx, use->Name(), static_cast<uint>(ireg),
            MachSpillCopyNode::spill_type(spill_type));
     C->record_method_not_compilable("attempted to spill a non-spillable item");
     return NULL;
   }
   if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
     return NULL;
   }
   const RegMask *i_mask = &def->out_RegMask();
-  const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
+  const RegMask *w_mask = C->matcher()->idealreg2spillmask[static_cast<uint>(ireg)];
   const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
   const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
   const RegMask *w_o_mask;
 
   int num_regs = RegMask::num_registers(ireg);

@@ -90,11 +90,11 @@
     // Mis-aligned doubles come here and XMM->FPR moves on x86.
     w_o_mask = o_mask;          // Must target desired registers
     // Does the ideal-reg-mask overlap with o_mask?  I.e., can I use
     // a reg-reg move or do I need a trip across register classes
     // (and thus through memory)?
-    if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
+    if( !C->matcher()->idealreg2regmask[static_cast<uint>(ireg)]->overlap( *o_mask) && o_mask->is_UP() )
       // Here we assume a trip through memory is required.
       w_i_mask = &C->FIRST_STACK_mask();
   }
   return new MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
 }

@@ -154,11 +154,11 @@
   // (The implicit_null_check function ensures the use is also dominated
   // by the branch-not-taken block.)
   Node *be = b->end();
   if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
     // Spill goes in the branch-not-taken block
-    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
+    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Opcodes::Op_IfTrue];
     loc = 0;                    // Just past the Region
   }
   assert( loc >= 0, "must insert past block head" );
 
   // Get a def-side SpillCopy

@@ -332,11 +332,11 @@
 
       Block *b_def = _cfg.get_block_for_node(def);
       int idx_def = b_def->find_node(def);
       // Cannot spill Op_RegFlags.
       Node *in_spill;
-      if (in->ideal_reg() != Op_RegFlags) {
+      if (in->ideal_reg() != Opcodes::Op_RegFlags) {
         in_spill = get_spillcopy_wide(MachSpillCopyNode::InputToRematerialization, in, def, i);
         if (!in_spill) { return 0; } // Bailed out
         insert_proj(b_def, idx_def, in_spill, maxlrg++);
         if (b_def == b) {
           insidx++;

@@ -949,11 +949,11 @@
             }
 
             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
             // Base pointers and oopmap references do not care where they live.
             if ((inpidx >= oopoff) ||
-                (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
+                (mach && mach->ideal_Opcode() == Opcodes::Op_AddP && inpidx == AddPNode::Base)) {
               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
                 // This def has been rematerialized a couple of times without
                 // progress. It doesn't care if it lives UP or DOWN, so
                 // spill it down now.
                 maxlrg = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx);

@@ -973,11 +973,11 @@
                 // derived value is spilling and we have a copy both in Reachblock
                 // (called here 'def') and debug_defs[slidx] we need to mention
                 // both in derived/base pairs or kill one.
                 Node *derived_debug = debug_defs[slidx];
                 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
-                    mach && mach->ideal_Opcode() != Op_Halt &&
+                    mach && mach->ideal_Opcode() != Opcodes::Op_Halt &&
                     derived_debug != NULL &&
                     derived_debug != def ) { // Actual 2nd value appears
                   // We have already set 'def' as a derived value.
                   // Also set debug_defs[slidx] as a derived value.
                   uint k;

@@ -1092,12 +1092,12 @@
                   insidx++;  // Reset iterator to skip USE side split
                 }
                 else {       // DOWN, mem->mem copy
                   // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
                   // First Split-UP to move value into Register
-                  uint def_ideal = def->ideal_reg();
-                  const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
+                  Opcodes def_ideal = def->ideal_reg();
+                  const RegMask* tmp_rm = Matcher::idealreg2regmask[static_cast<uint>(def_ideal)];
                   Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
                   insert_proj( b, insidx, spill, maxlrg );
                   // Then Split-DOWN as if previous Split was DEF
                   maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
                   // If it wasn't split bail

@@ -1182,11 +1182,11 @@
           set_was_spilled(n);
         assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
         // Grab UP info for DEF
         const RegMask &dmask = n->out_RegMask();
         bool defup = dmask.is_UP();
-        int ireg = n->ideal_reg();
+        Opcodes ireg = n->ideal_reg();
         bool is_vect = RegMask::is_vector(ireg);
         // Only split at Def if this is a HRP block or bound (and spilled once)
         if( !n->rematerialize() &&
             (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) &&
               (deflrg._direct_conflict || deflrg._must_spill)) ||

@@ -1241,12 +1241,12 @@
             deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
           LRG &uselrg = lrgs(useidx);
           if( OptoReg::is_stack(uselrg.reg()) &&
               uselrg.reg() < LRG::SPILL_REG && // USE is from stack
               deflrg.reg() != uselrg.reg() ) { // Not trivially removed
-            uint def_ideal_reg = n->bottom_type()->ideal_reg();
-            const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg];
+            Opcodes def_ideal_reg = n->bottom_type()->ideal_reg();
+            const RegMask &def_rm = *Matcher::idealreg2regmask[static_cast<uint>(def_ideal_reg)];
             const RegMask &use_rm = n->in_RegMask(copyidx);
             if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) {  // Bug 4707800, 'n' may be a storeSSL
               if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {  // Check when generating nodes
                 return 0;
               }
< prev index next >