--- old/src/share/vm/opto/lcm.cpp 2016-07-11 22:46:29.459437096 +0900 +++ new/src/share/vm/opto/lcm.cpp 2016-07-11 22:46:29.323437572 +0900 @@ -41,7 +41,7 @@ static bool accesses_heap_base_zone(Node *val) { if (Universe::narrow_oop_base() > 0) { // Implies UseCompressedOops. if (val && val->is_Mach()) { - if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) { + if (val->as_Mach()->ideal_Opcode() == Opcodes::Op_DecodeN) { // This assumes all Decodes with TypePtr::NotNull are matched to nodes that // decode NULL to point to the heap base (Decode_NN). if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) { @@ -88,7 +88,7 @@ // Make sure the ptr-is-null path appears to be uncommon! float f = block->end()->as_MachIf()->_prob; - if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; + if( proj->Opcode() == Opcodes::Op_IfTrue ) f = 1.0f - f; if( f > PROB_UNLIKELY_MAG(4) ) return; uint bidx = 0; // Capture index of value into memop @@ -147,7 +147,7 @@ val = (Node*)(((intptr_t)val) & ~1); assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() && - (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity"); + (val->as_Mach()->ideal_Opcode() == Opcodes::Op_DecodeN), "sanity"); // Search the successor block for a load or store who's base value is also // the tested value. There may be several. @@ -158,57 +158,57 @@ if( !m->is_Mach() ) continue; MachNode *mach = m->as_Mach(); was_store = false; - int iop = mach->ideal_Opcode(); + Opcodes iop = mach->ideal_Opcode(); switch( iop ) { - case Op_LoadB: - case Op_LoadUB: - case Op_LoadUS: - case Op_LoadD: - case Op_LoadF: - case Op_LoadI: - case Op_LoadL: - case Op_LoadP: - case Op_LoadN: - case Op_LoadS: - case Op_LoadKlass: - case Op_LoadNKlass: - case Op_LoadRange: - case Op_LoadD_unaligned: - case Op_LoadL_unaligned: + case Opcodes::Op_LoadB: + case Opcodes::Op_LoadUB: + case Opcodes::Op_LoadUS: + case Opcodes::Op_LoadD: + case Opcodes::Op_LoadF: + case Opcodes::Op_LoadI: + case Opcodes::Op_LoadL: + case Opcodes::Op_LoadP: + case Opcodes::Op_LoadN: + case Opcodes::Op_LoadS: + case Opcodes::Op_LoadKlass: + case Opcodes::Op_LoadNKlass: + case Opcodes::Op_LoadRange: + case Opcodes::Op_LoadD_unaligned: + case Opcodes::Op_LoadL_unaligned: assert(mach->in(2) == val, "should be address"); break; - case Op_StoreB: - case Op_StoreC: - case Op_StoreCM: - case Op_StoreD: - case Op_StoreF: - case Op_StoreI: - case Op_StoreL: - case Op_StoreP: - case Op_StoreN: - case Op_StoreNKlass: + case Opcodes::Op_StoreB: + case Opcodes::Op_StoreC: + case Opcodes::Op_StoreCM: + case Opcodes::Op_StoreD: + case Opcodes::Op_StoreF: + case Opcodes::Op_StoreI: + case Opcodes::Op_StoreL: + case Opcodes::Op_StoreP: + case Opcodes::Op_StoreN: + case Opcodes::Op_StoreNKlass: was_store = true; // Memory op is a store op // Stores will have their address in slot 2 (memory in slot 1). // If the value being nul-checked is in another slot, it means we // are storing the checked value, which does NOT check the value! if( mach->in(2) != val ) continue; break; // Found a memory op? - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: - case Op_StrIndexOfChar: - case Op_AryEq: - case Op_StrInflatedCopy: - case Op_StrCompressedCopy: - case Op_EncodeISOArray: - case Op_HasNegatives: + case Opcodes::Op_StrComp: + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOf: + case Opcodes::Op_StrIndexOfChar: + case Opcodes::Op_AryEq: + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_EncodeISOArray: + case Opcodes::Op_HasNegatives: // Not a legit memory op for implicit null check regardless of // embedded loads continue; default: // Also check for embedded loads if( !mach->needs_anti_dependence_check() ) continue; // Not an memory op; skip it - if( must_clone[iop] ) { + if( must_clone[static_cast(iop)] ) { // Do not move nodes which produce flags because // RA will try to clone it to place near branch and // it will cause recompilation, see clone_node(). @@ -401,7 +401,7 @@ // NULL checks are always branch-if-eq. If we see a IfTrue projection // then we are replacing a 'ne' test with a 'eq' NULL check test. // We need to flip the projections to keep the same semantics. - if( proj->Opcode() == Op_IfTrue ) { + if( proj->Opcode() == Opcodes::Op_IfTrue ) { // Swap order of projections in basic block to swap branch targets Node *tmp1 = block->get_node(block->end_idx()+1); Node *tmp2 = block->get_node(block->end_idx()+2); @@ -480,11 +480,11 @@ // uses of the phi are scheduled. Node *n = worklist[i]; // Get Node on worklist - int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0; + Opcodes iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : Opcodes::Op_Node;; if( n->is_Proj() || // Projections always win - n->Opcode()== Op_Con || // So does constant 'Top' - iop == Op_CreateEx || // Create-exception must start block - iop == Op_CheckCastPP + n->Opcode()== Opcodes::Op_Con || // So does constant 'Top' + iop == Opcodes::Op_CreateEx || // Create-exception must start block + iop == Opcodes::Op_CheckCastPP ) { worklist.map(i,worklist.pop()); return n; @@ -500,9 +500,9 @@ continue; // Schedule IV increment last. - if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd) { + if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Opcodes::Op_CountedLoopEnd) { // Cmp might be matched into CountedLoopEnd node. - Node *cmp = (e->in(1)->ideal_reg() == Op_RegFlags) ? e->in(1) : e; + Node *cmp = (e->in(1)->ideal_reg() == Opcodes::Op_RegFlags) ? e->in(1) : e; if (cmp->req() > 1 && cmp->in(1) == n && n->is_iteratively_computed()) { continue; } @@ -513,7 +513,7 @@ // See if this instruction is consumed by a branch. If so, then (as the // branch is the last instruction in the basic block) force it to the // end of the basic block - if ( must_clone[iop] ) { + if ( must_clone[static_cast(iop)] ) { // See if any use is a branch bool found_machif = false; @@ -542,7 +542,7 @@ for (uint j = 0; j < n->req() ; j++) { Node *inn = n->in(j); if (inn) { - if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) { + if (inn->is_Mach() && must_clone[static_cast(inn->as_Mach()->ideal_Opcode())] ) { n_choice = 3; break; } @@ -642,20 +642,20 @@ if (!m->is_Mach()) continue; MachNode *mach = m->as_Mach(); bool src_matches = false; - int iop = mach->ideal_Opcode(); + Opcodes iop = mach->ideal_Opcode(); switch (iop) { - case Op_StoreB: - case Op_StoreC: - case Op_StoreCM: - case Op_StoreD: - case Op_StoreF: - case Op_StoreI: - case Op_StoreL: - case Op_StoreP: - case Op_StoreN: - case Op_StoreVector: - case Op_StoreNKlass: + case Opcodes::Op_StoreB: + case Opcodes::Op_StoreC: + case Opcodes::Op_StoreCM: + case Opcodes::Op_StoreD: + case Opcodes::Op_StoreF: + case Opcodes::Op_StoreI: + case Opcodes::Op_StoreL: + case Opcodes::Op_StoreP: + case Opcodes::Op_StoreN: + case Opcodes::Op_StoreVector: + case Opcodes::Op_StoreNKlass: for (uint k = 1; k < m->req(); k++) { Node *in = m->in(k); if (in == src_n) { @@ -810,23 +810,23 @@ // Set all registers killed and not already defined by the call. uint r_cnt = mcall->tf()->range()->cnt(); - int op = mcall->ideal_Opcode(); - MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); + Opcodes op = mcall->ideal_Opcode(); + MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, static_cast(MachProjNode::projType::fat_proj) ); map_node_to_block(proj, block); block->insert_node(proj, node_cnt++); // Select the right register save policy. const char *save_policy = NULL; switch (op) { - case Op_CallRuntime: - case Op_CallLeaf: - case Op_CallLeafNoFP: + case Opcodes::Op_CallRuntime: + case Opcodes::Op_CallLeaf: + case Opcodes::Op_CallLeafNoFP: // Calling C code so use C calling convention save_policy = _matcher._c_reg_save_policy; break; - case Op_CallStaticJava: - case Op_CallDynamicJava: + case Opcodes::Op_CallStaticJava: + case Opcodes::Op_CallDynamicJava: // Calling Java code so use Java calling convention save_policy = _matcher._register_save_policy; break; @@ -843,13 +843,13 @@ // done for oops since idealreg2debugmask takes care of debug info // references but there no way to handle oops differently than other // pointers as far as the kill mask goes. - bool exclude_soe = op == Op_CallRuntime; + bool exclude_soe = op == Opcodes::Op_CallRuntime; // If the call is a MethodHandle invoke, we need to exclude the // register which is used to save the SP value over MH invokes from // the mask. Otherwise this register could be used for // deoptimization information. - if (op == Op_CallStaticJava) { + if (op == Opcodes::Op_CallStaticJava) { MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall; if (mcallstaticjava->_method_handle_invoke) proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask()); @@ -927,7 +927,7 @@ #ifdef ASSERT if( UseConcMarkSweepGC || UseG1GC ) { - if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { + if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Opcodes::Op_StoreCM ) { // Check the precedence edges for (uint prec = n->req(); prec < n->len(); prec++) { Node* oop_store = n->in(prec); @@ -942,8 +942,8 @@ // A few node types require changing a required edge to a precedence edge // before allocation. if( n->is_Mach() && n->req() > TypeFunc::Parms && - (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire || - n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) { + (n->as_Mach()->ideal_Opcode() == Opcodes::Op_MemBarAcquire || + n->as_Mach()->ideal_Opcode() == Opcodes::Op_MemBarVolatile) ) { // MemBarAcquire could be created without Precedent edge. // del_req() replaces the specified edge with the last input edge // and then removes the last edge. If the specified edge > number of @@ -996,7 +996,7 @@ // of the phi to be scheduled first. The select() method breaks // ties in scheduling by worklist order. delay.push(m); - } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) { + } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Opcodes::Op_CreateEx) { // Force the CreateEx to the top of the list so it's processed // first and ends up at the start of the block. worklist.insert(0, m); @@ -1092,7 +1092,7 @@ regs.Insert(_matcher.c_frame_pointer()); regs.OR(n->out_RegMask()); - MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); + MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, static_cast(MachProjNode::projType::fat_proj) ); map_node_to_block(proj, block); block->insert_node(proj, phi_cnt++); @@ -1356,7 +1356,7 @@ // If any newly created nodes remain, move the CreateEx node to the top if (new_cnt > 0) { Node *cex = sb->get_node(1+new_cnt); - if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { + if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Opcodes::Op_CreateEx ) { sb->remove_node(1+new_cnt); sb->insert_node(cex, 1); }