--- old/src/share/vm/opto/escape.cpp 2016-07-11 22:46:24.163455638 +0900 +++ new/src/share/vm/opto/escape.cpp 2016-07-11 22:46:24.020456139 +0900 @@ -160,7 +160,7 @@ // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && - (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { + (n->Opcode() == Opcodes::Op_CmpP || n->Opcode() == Opcodes::Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { @@ -168,7 +168,7 @@ // escape status of the associated Allocate node some of them // may be eliminated. storestore_worklist.append(n); - } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && + } else if (n->is_MemBar() && (n->Opcode() == Opcodes::Op_MemBarRelease) && (n->req() > MemBarNode::Precedent)) { record_for_optimizer(n); #ifdef ASSERT @@ -379,9 +379,9 @@ if (n_ptn == phantom_obj || n_ptn == null_obj) return; // Skip predefined nodes. - int opcode = n->Opcode(); + Opcodes opcode = n->Opcode(); switch (opcode) { - case Op_AddP: { + case Opcodes::Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); // Field nodes are created for all field types. They are used in @@ -398,30 +398,30 @@ } break; } - case Op_CastX2P: { + case Opcodes::Op_CastX2P: { map_ideal_node(n, phantom_obj); break; } - case Op_CastPP: - case Op_CheckCastPP: - case Op_EncodeP: - case Op_DecodeN: - case Op_EncodePKlass: - case Op_DecodeNKlass: { + case Opcodes::Op_CastPP: + case Opcodes::Op_CheckCastPP: + case Opcodes::Op_EncodeP: + case Opcodes::Op_DecodeN: + case Opcodes::Op_EncodePKlass: + case Opcodes::Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); break; } - case Op_CMoveP: { + case Opcodes::Op_CMoveP: { add_local_var(n, PointsToNode::NoEscape); // Do not add edges during first iteration because some could be // not defined yet. delayed_worklist->push(n); break; } - case Op_ConP: - case Op_ConN: - case Op_ConNKlass: { + case Opcodes::Op_ConP: + case Opcodes::Op_ConN: + case Opcodes::Op_ConNKlass: { // assume all oop constants globally escape except for null PointsToNode::EscapeState es; const Type* t = igvn->type(n); @@ -433,34 +433,34 @@ add_java_object(n, es); break; } - case Op_CreateEx: { + case Opcodes::Op_CreateEx: { // assume that all exception objects globally escape map_ideal_node(n, phantom_obj); break; } - case Op_LoadKlass: - case Op_LoadNKlass: { + case Opcodes::Op_LoadKlass: + case Opcodes::Op_LoadNKlass: { // Unknown class is loaded map_ideal_node(n, phantom_obj); break; } - case Op_LoadP: - case Op_LoadN: - case Op_LoadPLocked: { + case Opcodes::Op_LoadP: + case Opcodes::Op_LoadN: + case Opcodes::Op_LoadPLocked: { add_objload_to_connection_graph(n, delayed_worklist); break; } - case Op_Parm: { + case Opcodes::Op_Parm: { map_ideal_node(n, phantom_obj); break; } - case Op_PartialSubtypeCheck: { + case Opcodes::Op_PartialSubtypeCheck: { // Produces Null or notNull and is used in only in CmpP so // phantom_obj could be used. map_ideal_node(n, phantom_obj); // Result is unknown break; } - case Op_Phi: { + case Opcodes::Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); @@ -472,7 +472,7 @@ } break; } - case Op_Proj: { + case Opcodes::Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { @@ -481,8 +481,8 @@ } break; } - case Op_Rethrow: // Exception object escapes - case Op_Return: { + case Opcodes::Op_Rethrow: // Exception object escapes + case Opcodes::Op_Return: { if (n->req() > TypeFunc::Parms && igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. @@ -491,21 +491,21 @@ } break; } - case Op_CompareAndExchangeP: - case Op_CompareAndExchangeN: - case Op_GetAndSetP: - case Op_GetAndSetN: { + case Opcodes::Op_CompareAndExchangeP: + case Opcodes::Op_CompareAndExchangeN: + case Opcodes::Op_GetAndSetP: + case Opcodes::Op_GetAndSetN: { add_objload_to_connection_graph(n, delayed_worklist); // fallthrough } - case Op_StoreP: - case Op_StoreN: - case Op_StoreNKlass: - case Op_StorePConditional: - case Op_WeakCompareAndSwapP: - case Op_WeakCompareAndSwapN: - case Op_CompareAndSwapP: - case Op_CompareAndSwapN: { + case Opcodes::Op_StoreP: + case Opcodes::Op_StoreN: + case Opcodes::Op_StoreNKlass: + case Opcodes::Op_StorePConditional: + case Opcodes::Op_WeakCompareAndSwapP: + case Opcodes::Op_WeakCompareAndSwapN: + case Opcodes::Op_CompareAndSwapP: + case Opcodes::Op_CompareAndSwapN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = igvn->type(adr); adr_type = adr_type->make_ptr(); @@ -513,7 +513,7 @@ break; // skip dead nodes } if (adr_type->isa_oopptr() || - (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && + (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { @@ -531,17 +531,17 @@ if (adr->is_BoxLock()) break; // Stored value escapes in unsafe access. - if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { + if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) { // Pointer stores in G1 barriers looks like unsafe access. // Ignore such stores to be able scalar replace non-escaping // allocations. if (UseG1GC && adr->is_AddP()) { Node* base = get_addp_base(adr); - if (base->Opcode() == Op_LoadP && + if (base->Opcode() == Opcodes::Op_LoadP && base->in(MemNode::Address)->is_AddP()) { adr = base->in(MemNode::Address); Node* tls = get_addp_base(adr); - if (tls->Opcode() == Op_ThreadLocal) { + if (tls->Opcode() == Opcodes::Op_ThreadLocal) { int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf())) { @@ -564,20 +564,20 @@ } break; } - case Op_AryEq: - case Op_HasNegatives: - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: - case Op_StrIndexOfChar: - case Op_StrInflatedCopy: - case Op_StrCompressedCopy: - case Op_EncodeISOArray: { + case Opcodes::Op_AryEq: + case Opcodes::Op_HasNegatives: + case Opcodes::Op_StrComp: + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOf: + case Opcodes::Op_StrIndexOfChar: + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_EncodeISOArray: { add_local_var(n, PointsToNode::ArgEscape); delayed_worklist->push(n); // Process it later. break; } - case Op_ThreadLocal: { + case Opcodes::Op_ThreadLocal: { add_java_object(n, PointsToNode::ArgEscape); break; } @@ -613,26 +613,26 @@ assert(n->is_Store() || n->is_LoadStore() || (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), "node should be registered already"); - int opcode = n->Opcode(); + Opcodes opcode = n->Opcode(); switch (opcode) { - case Op_AddP: { + case Opcodes::Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); assert(ptn_base != NULL, "field's base should be registered"); add_base(n_ptn->as_Field(), ptn_base); break; } - case Op_CastPP: - case Op_CheckCastPP: - case Op_EncodeP: - case Op_DecodeN: - case Op_EncodePKlass: - case Op_DecodeNKlass: { + case Opcodes::Op_CastPP: + case Opcodes::Op_CheckCastPP: + case Opcodes::Op_EncodeP: + case Opcodes::Op_DecodeN: + case Opcodes::Op_EncodePKlass: + case Opcodes::Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); break; } - case Op_CMoveP: { + case Opcodes::Op_CMoveP: { for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { Node* in = n->in(i); if (in == NULL) @@ -646,9 +646,9 @@ } break; } - case Op_LoadP: - case Op_LoadN: - case Op_LoadPLocked: { + case Opcodes::Op_LoadP: + case Opcodes::Op_LoadN: + case Opcodes::Op_LoadPLocked: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = _igvn->type(n); @@ -659,7 +659,7 @@ } ELSE_FAIL("Op_LoadP"); } - case Op_Phi: { + case Opcodes::Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); @@ -679,7 +679,7 @@ } ELSE_FAIL("Op_Phi"); } - case Op_Proj: { + case Opcodes::Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { @@ -688,8 +688,8 @@ } ELSE_FAIL("Op_Proj"); } - case Op_Rethrow: // Exception object escapes - case Op_Return: { + case Opcodes::Op_Rethrow: // Exception object escapes + case Opcodes::Op_Return: { if (n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. @@ -699,18 +699,18 @@ } ELSE_FAIL("Op_Return"); } - case Op_StoreP: - case Op_StoreN: - case Op_StoreNKlass: - case Op_StorePConditional: - case Op_CompareAndExchangeP: - case Op_CompareAndExchangeN: - case Op_CompareAndSwapP: - case Op_CompareAndSwapN: - case Op_WeakCompareAndSwapP: - case Op_WeakCompareAndSwapN: - case Op_GetAndSetP: - case Op_GetAndSetN: { + case Opcodes::Op_StoreP: + case Opcodes::Op_StoreN: + case Opcodes::Op_StoreNKlass: + case Opcodes::Op_StorePConditional: + case Opcodes::Op_CompareAndExchangeP: + case Opcodes::Op_CompareAndExchangeN: + case Opcodes::Op_CompareAndSwapP: + case Opcodes::Op_CompareAndSwapN: + case Opcodes::Op_WeakCompareAndSwapP: + case Opcodes::Op_WeakCompareAndSwapN: + case Opcodes::Op_GetAndSetP: + case Opcodes::Op_GetAndSetN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); @@ -721,12 +721,12 @@ break; } #endif - if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || - opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { + if (opcode == Opcodes::Op_GetAndSetP || opcode == Opcodes::Op_GetAndSetN || + opcode == Opcodes::Op_CompareAndExchangeN || opcode == Opcodes::Op_CompareAndExchangeP) { add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); } if (adr_type->isa_oopptr() || - (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && + (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { @@ -739,7 +739,7 @@ assert(ptn != NULL, "node should be registered"); add_edge(adr_ptn, ptn); break; - } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { + } else if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) { // Stored value escapes in unsafe access. Node *val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); @@ -756,15 +756,15 @@ } ELSE_FAIL("Op_StoreP"); } - case Op_AryEq: - case Op_HasNegatives: - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: - case Op_StrIndexOfChar: - case Op_StrInflatedCopy: - case Op_StrCompressedCopy: - case Op_EncodeISOArray: { + case Opcodes::Op_AryEq: + case Opcodes::Op_HasNegatives: + case Opcodes::Op_StrComp: + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOf: + case Opcodes::Op_StrIndexOfChar: + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_EncodeISOArray: { // char[]/byte[] arrays passed to string intrinsic do not escape but // they are not scalar replaceable. Adjust escape state for them. // Start from in(2) edge since in(1) is memory edge. @@ -904,7 +904,7 @@ } else { // An other type of call, assume the worst case: // returned value is unknown and globally escapes. - assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); + assert(call->Opcode() == Opcodes::Op_CallDynamicJava, "add failed case check"); map_ideal_node(call, phantom_obj); } } @@ -913,22 +913,22 @@ bool is_arraycopy = false; switch (call->Opcode()) { #ifdef ASSERT - case Op_Allocate: - case Op_AllocateArray: - case Op_Lock: - case Op_Unlock: + case Opcodes::Op_Allocate: + case Opcodes::Op_AllocateArray: + case Opcodes::Op_Lock: + case Opcodes::Op_Unlock: assert(false, "should be done already"); break; #endif - case Op_ArrayCopy: - case Op_CallLeafNoFP: + case Opcodes::Op_ArrayCopy: + case Opcodes::Op_CallLeafNoFP: // Most array copies are ArrayCopy nodes at this point but there // are still a few direct calls to the copy subroutines (See // PhaseStringOpts::copy_string()) - is_arraycopy = (call->Opcode() == Op_ArrayCopy) || + is_arraycopy = (call->Opcode() == Opcodes::Op_ArrayCopy) || call->as_CallLeaf()->is_call_to_arraycopystub(); // fall through - case Op_CallLeaf: { + case Opcodes::Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. const TypeTuple * d = call->tf()->domain(); @@ -1044,7 +1044,7 @@ } break; } - case Op_CallStaticJava: { + case Opcodes::Op_CallStaticJava: { // For a static call, we know exactly what method is being called. // Use bytecode estimator to record the call's escape affects #ifdef ASSERT @@ -1872,7 +1872,7 @@ if (res != NULL) { #ifndef PRODUCT if (PrintOptimizePtrCompare) { - tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); + tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Opcodes::Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); if (Verbose) { n->dump(1); } @@ -1897,7 +1897,7 @@ Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); if (not_global_escape(alloc)) { - MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); + MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarCPUOrder, Compile::AliasIdxBot); mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); igvn->register_new_node_with_optimizer(mb); @@ -2067,7 +2067,7 @@ bt = field->layout_type(); } else { // Check for unsafe oop field access - if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { + if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) { bt = T_OBJECT; (*unsafe) = true; } @@ -2083,7 +2083,7 @@ } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access - if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { + if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) { bt = T_OBJECT; } } @@ -2307,9 +2307,9 @@ base = base->in(AddPNode::Address); } Node* uncast_base = base->uncast(); - int opcode = uncast_base->Opcode(); - assert(opcode == Op_ConP || opcode == Op_ThreadLocal || - opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || + Opcodes opcode = uncast_base->Opcode(); + assert(opcode == Opcodes::Op_ConP || opcode == Opcodes::Op_ThreadLocal || + opcode == Opcodes::Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); } @@ -2621,7 +2621,7 @@ --i; #ifdef ASSERT } else if (use->is_Mem()) { - if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { + if (use->Opcode() == Opcodes::Op_StoreCM && use->in(MemNode::OopStore) == n) { // Don't move related cardmark. continue; } @@ -2737,14 +2737,14 @@ break; } // Otherwise skip it (the call updated 'result' value). - } else if (result->Opcode() == Op_SCMemProj) { + } else if (result->Opcode() == Opcodes::Op_SCMemProj) { Node* mem = result->in(0); Node* adr = NULL; if (mem->is_LoadStore()) { adr = mem->in(MemNode::Address); } else { - assert(mem->Opcode() == Op_EncodeISOArray || - mem->Opcode() == Op_StrCompressedCopy, "sanity"); + assert(mem->Opcode() == Opcodes::Op_EncodeISOArray || + mem->Opcode() == Opcodes::Op_StrCompressedCopy, "sanity"); adr = mem->in(3); // Memory edge corresponds to destination array } const Type *at = igvn->type(adr); @@ -2758,7 +2758,7 @@ } } result = mem->in(MemNode::Memory); - } else if (result->Opcode() == Op_StrInflatedCopy) { + } else if (result->Opcode() == Opcodes::Op_StrInflatedCopy) { Node* adr = result->in(3); // Memory edge corresponds to destination array const Type *at = igvn->type(adr); if (at != Type::TOP) { @@ -3043,7 +3043,7 @@ n->is_CheckCastPP() || n->is_EncodeP() || n->is_DecodeN() || - (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { + (n->is_ConstraintCast() && n->Opcode() == Opcodes::Op_CastPP)) { if (visited.test_set(n->_idx)) { assert(n->is_Phi(), "loops only through Phi's"); continue; // already processed @@ -3113,7 +3113,7 @@ use->is_CheckCastPP() || use->is_EncodeNarrowPtr() || use->is_DecodeNarrowPtr() || - (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { + (use->is_ConstraintCast() && use->Opcode() == Opcodes::Op_CastPP)) { alloc_worklist.append_if_missing(use); #ifdef ASSERT } else if (use->is_Mem()) { @@ -3127,22 +3127,22 @@ if (m->is_MergeMem()) { assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); } - } else if (use->Opcode() == Op_EncodeISOArray) { + } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { - uint op = use->Opcode(); + Opcodes op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && - (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { + (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); - } else if (!(op == Op_CmpP || op == Op_Conv2B || - op == Op_CastP2X || op == Op_StoreCM || - op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || - op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || - op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { + } else if (!(op == Opcodes::Op_CmpP || op == Opcodes::Op_Conv2B || + op == Opcodes::Op_CastP2X || op == Opcodes::Op_StoreCM || + op == Opcodes::Op_FastLock || op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives || + op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy || + op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); @@ -3204,11 +3204,11 @@ n = n->as_MemBar()->proj_out(TypeFunc::Memory); if (n == NULL) continue; - } else if (n->Opcode() == Op_StrCompressedCopy || - n->Opcode() == Op_EncodeISOArray) { + } else if (n->Opcode() == Opcodes::Op_StrCompressedCopy || + n->Opcode() == Opcodes::Op_EncodeISOArray) { // get the memory projection - n = n->find_out_with(Op_SCMemProj); - assert(n->Opcode() == Op_SCMemProj, "memory projection required"); + n = n->find_out_with(Opcodes::Op_SCMemProj); + assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required"); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); @@ -3231,8 +3231,8 @@ continue; // don't push users } else if (n->is_LoadStore()) { // get the memory projection - n = n->find_out_with(Op_SCMemProj); - assert(n->Opcode() == Op_SCMemProj, "memory projection required"); + n = n->find_out_with(Opcodes::Op_SCMemProj); + assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required"); } } // push user on appropriate worklist @@ -3241,7 +3241,7 @@ if (use->is_Phi() || use->is_ClearArray()) { memnode_worklist.append_if_missing(use); } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { - if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores + if (use->Opcode() == Opcodes::Op_StoreCM) // Ignore cardmark stores continue; memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { @@ -3253,23 +3253,23 @@ assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); - } else if (use->Opcode() == Op_EncodeISOArray) { + } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { - uint op = use->Opcode(); + Opcodes op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && - (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { + (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); - } else if (!(op == Op_StoreCM || - (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && + } else if (!(op == Opcodes::Op_StoreCM || + (op == Opcodes::Op_CallLeaf && use->as_CallLeaf()->_name != NULL && strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || - op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || - op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || - op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { + op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives || + op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy || + op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing memory path");