--- old/src/share/vm/opto/output.cpp 2016-07-11 22:46:48.718369666 +0900 +++ new/src/share/vm/opto/output.cpp 2016-07-11 22:46:48.580370150 +0900 @@ -105,8 +105,8 @@ Block* block = _cfg->get_block(i); if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point? Node* m = block->end(); - if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) { - MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); + if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Opcodes::Op_Halt) { + MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Opcodes::Op_Return); block->add_inst(epilog); _cfg->map_node_to_block(epilog, block); } @@ -381,7 +381,7 @@ // Find the branch; ignore trailing NOPs. for (j = block->number_of_nodes()-1; j>=0; j--) { Node* n = block->get_node(j); - if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) + if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Opcodes::Op_Con) break; } assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity"); @@ -1313,7 +1313,7 @@ } } mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num ); - } else if (mach->ideal_Opcode() == Op_Jump) { + } else if (mach->ideal_Opcode() == Opcodes::Op_Jump) { for (uint h = 0; h < block->_num_succs; h++) { Block* succs_block = block->_succs[h]; for (uint j = 1; j < succs_block->num_preds(); j++) { @@ -1328,7 +1328,7 @@ } #ifdef ASSERT // Check that oop-store precedes the card-mark - else if (mach->ideal_Opcode() == Op_StoreCM) { + else if (mach->ideal_Opcode() == Opcodes::Op_StoreCM) { uint storeCM_idx = j; int count = 0; for (uint prec = mach->req(); prec < mach->len(); prec++) { @@ -1572,7 +1572,7 @@ // Find the branch; ignore trailing NOPs. for (j = block->number_of_nodes() - 1; j >= 0; j--) { n = block->get_node(j); - if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) { + if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Opcodes::Op_Con) { break; } } @@ -1967,15 +1967,15 @@ // Special Check for compares following branches if( n->is_Mach() && _scheduled.size() > 0 ) { - int op = n->as_Mach()->ideal_Opcode(); + Opcodes op = n->as_Mach()->ideal_Opcode(); Node *last = _scheduled[0]; if( last->is_MachIf() && last->in(1) == n && - ( op == Op_CmpI || - op == Op_CmpU || - op == Op_CmpP || - op == Op_CmpF || - op == Op_CmpD || - op == Op_CmpL ) ) { + ( op == Opcodes::Op_CmpI || + op == Opcodes::Op_CmpU || + op == Opcodes::Op_CmpP || + op == Opcodes::Op_CmpF || + op == Opcodes::Op_CmpD || + op == Opcodes::Op_CmpL ) ) { // Recalculate position, moving to front of same latency for ( i=0 ; i < _available.size(); i++ ) @@ -2200,11 +2200,11 @@ // not in the bb->_nodes array. This happens for debug-info-only BoxLocks. // 'Schedule' them (basically ignore in the schedule) but do not insert them // into the block. All other scheduled nodes get put in the schedule here. - int op = n->Opcode(); - if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR - (op != Op_Node && // Not an unused antidepedence node and + Opcodes op = n->Opcode(); + if( (op == Opcodes::Op_Node && n->req() == 0) || // anti-dependence node OR + (op != Opcodes::Op_Node && // Not an unused antidepedence node and // not an unallocated boxlock - (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) { + (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Opcodes::Op_BoxLock)) ) { // Push any trailing projections if( bb->get_node(bb->number_of_nodes()-1) != n ) { @@ -2355,10 +2355,10 @@ // Also, MachIdealNodes do not get scheduled if( !n->is_Mach() ) continue; // Skip non-machine nodes MachNode *mach = n->as_Mach(); - int iop = mach->ideal_Opcode(); - if( iop == Op_CreateEx ) continue; // CreateEx is pinned - if( iop == Op_Con ) continue; // Do not schedule Top - if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes + Opcodes iop = mach->ideal_Opcode(); + if( iop == Opcodes::Op_CreateEx ) continue; // CreateEx is pinned + if( iop == Opcodes::Op_Con ) continue; // Do not schedule Top + if( iop == Opcodes::Op_Node && // Do not schedule PhiNodes, ProjNodes mach->pipeline() == MachNode::pipeline_class() && !n->is_SpillCopy() && !n->is_MachMerge() ) // Breakpoints, Prolog, etc continue; @@ -2373,13 +2373,13 @@ Node *last = bb->get_node(_bb_end); // Ignore trailing NOPs. while (_bb_end > 0 && last->is_Mach() && - last->as_Mach()->ideal_Opcode() == Op_Con) { + last->as_Mach()->ideal_Opcode() == Opcodes::Op_Con) { last = bb->get_node(--_bb_end); } - assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, ""); + assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Opcodes::Op_Con, ""); if( last->is_Catch() || // Exclude unreachable path case when Halt node is in a separate block. - (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { + (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Opcodes::Op_Halt) ) { // There must be a prior call. Skip it. while( !bb->get_node(--_bb_end)->is_MachCall() ) { assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" ); @@ -2502,8 +2502,8 @@ // USE to the live set. for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) { Node *n = b->get_node(i); - int n_op = n->Opcode(); - if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) { + Opcodes n_op = n->Opcode(); + if( n_op == Opcodes::Op_MachProj && n->ideal_reg() == static_cast(MachProjNode::projType::fat_proj) ) { // Fat-proj kills a slew of registers RegMask rm = n->out_RegMask();// Make local copy while( rm.is_NotEmpty() ) { @@ -2511,7 +2511,7 @@ rm.Remove(kill); verify_do_def( n, kill, msg ); } - } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes + } else if( n_op != Opcodes::Op_Node ) { // Avoid brand new antidependence nodes // Get DEF'd registers the normal way verify_do_def( n, _regalloc->get_reg_first(n), msg ); verify_do_def( n, _regalloc->get_reg_second(n), msg ); @@ -2571,7 +2571,7 @@ // Finding a kill requires a real pinch-point. // Check for not already having a pinch-point. // Pinch points are Op_Node's. - if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point? + if( pinch->Opcode() != Opcodes::Op_Node ) { // Or later-def/kill as pinch-point? later_def = pinch; // Must be def/kill as optimistic pinch-point if ( _pinch_free_list.size() > 0) { pinch = _pinch_free_list.pop(); @@ -2585,7 +2585,7 @@ _cfg->map_node_to_block(pinch, b); // Pretend it's valid in this block (lazy init) _reg_node.map(def_reg,pinch); // Record pinch-point //_regalloc->set_bad(pinch->_idx); // Already initialized this way. - if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill + if( later_def->outcnt() == 0 || later_def->ideal_reg() == static_cast(MachProjNode::projType::fat_proj) ) { // Distinguish def from kill pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch later_def = NULL; // and no later def @@ -2601,7 +2601,7 @@ add_prec_edge_from_to(later_def,kill); // Add edge from def to kill // See if current kill is also a use, and so is forced to be the pinch-point. - if( pinch->Opcode() == Op_Node ) { + if( pinch->Opcode() == Opcodes::Op_Node ) { Node *uses = kill->is_Proj() ? kill->in(0) : kill; for( uint i=1; ireq(); i++ ) { if( _regalloc->get_reg_first(uses->in(i)) == def_reg || @@ -2628,7 +2628,7 @@ if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b && // Use has to be block-local as well _cfg->get_block_for_node(use) == b) { - if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) + if( pinch->Opcode() == Opcodes::Op_Node && // Real pinch-point (not optimistic?) pinch->req() == 1 ) { // pinch not yet in block? pinch->del_req(0); // yank pointer to later-def, also set flag // Insert the pinch-point in the block just after the last use @@ -2689,7 +2689,7 @@ for( uint i = _bb_end-1; i >= _bb_start; i-- ) { Node *n = b->get_node(i); int is_def = n->outcnt(); // def if some uses prior to adding precedence edges - if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) { + if( n->is_MachProj() && n->ideal_reg() == static_cast(MachProjNode::projType::fat_proj) ) { // Fat-proj kills a slew of registers // This can add edges to 'n' and obscure whether or not it was a def, // hence the is_def flag. @@ -2709,7 +2709,7 @@ // Kill projections on a branch should appear to occur on the // branch, not afterwards, so grab the masks from the projections // and process them. - if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) { + if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Opcodes::Op_Jump) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* use = n->fast_out(i); if (use->is_Proj()) { @@ -2728,7 +2728,7 @@ for( uint j=0; jreq(); j++ ) { Node *def = n->in(j); if( def ) { - assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" ); + assert( !def->is_MachProj() || def->ideal_reg() != static_cast(MachProjNode::projType::fat_proj), "" ); anti_do_use( b, n, _regalloc->get_reg_first(def) ); anti_do_use( b, n, _regalloc->get_reg_second(def) ); } @@ -2758,7 +2758,7 @@ } for( uint j=last_safept; j > i; j-- ) { Node *mach = b->get_node(j); - if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP ) + if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Opcodes::Op_AddP ) mach->add_prec( n ); } last_safept = i; @@ -2804,7 +2804,7 @@ int trace_cnt = 0; for (uint k = 0; k < _reg_node.Size(); k++) { Node* pinch = _reg_node[k]; - if ((pinch != NULL) && pinch->Opcode() == Op_Node && + if ((pinch != NULL) && pinch->Opcode() == Opcodes::Op_Node && // no predecence input edges (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { cleanup_pinch(pinch); @@ -2829,7 +2829,7 @@ // Clean up a pinch node for reuse. void Scheduling::cleanup_pinch( Node *pinch ) { - assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking"); + assert (pinch && pinch->Opcode() == Opcodes::Op_Node && pinch->req() == 1, "just checking"); for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) { Node* use = pinch->last_out(i);