--- old/src/share/vm/opto/memnode.cpp 2016-07-11 22:46:41.213395943 +0900 +++ new/src/share/vm/opto/memnode.cpp 2016-07-11 22:46:41.069396447 +0900 @@ -494,8 +494,8 @@ // Find an arraycopy that must have set (can_see_stored_value=true) or // could have set (can_see_stored_value=false) the value for this load Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { - if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore || - mem->in(0)->Opcode() == Op_MemBarCPUOrder)) { + if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Opcodes::Op_MemBarStoreStore || + mem->in(0)->Opcode() == Opcodes::Op_MemBarCPUOrder)) { Node* mb = mem->in(0); if (mb->in(0) != NULL && mb->in(0)->is_Proj() && mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) { @@ -734,7 +734,7 @@ uint LoadNode::cmp( const Node &n ) const { return !Type::cmp( _type, ((LoadNode&)n)._type ); } const Type *LoadNode::bottom_type() const { return _type; } -uint LoadNode::ideal_reg() const { +Opcodes LoadNode::ideal_reg() const { return _type->ideal_reg(); } @@ -756,7 +756,7 @@ // Helper function to allow a raw load without control edge for some cases bool LoadNode::is_immutable_value(Node* adr) { return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && - adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && + adr->in(AddPNode::Address)->Opcode() == Opcodes::Op_ThreadLocal && (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(JavaThread::osthread_offset()))); } @@ -810,7 +810,7 @@ if (mismatched) { load->set_mismatched_access(); } - if (load->Opcode() == Op_LoadN) { + if (load->Opcode() == Opcodes::Op_LoadN) { Node* ld = gvn.transform(load); return new DecodeNNode(ld, ld->bottom_type()->make_ptr()); } @@ -950,15 +950,15 @@ // through MemBarAcquire since the could allow them to move out of // a synchronized region. while (current->is_Proj()) { - int opc = current->in(0)->Opcode(); - if ((final && (opc == Op_MemBarAcquire || - opc == Op_MemBarAcquireLock || - opc == Op_LoadFence)) || - opc == Op_MemBarRelease || - opc == Op_StoreFence || - opc == Op_MemBarReleaseLock || - opc == Op_MemBarStoreStore || - opc == Op_MemBarCPUOrder) { + Opcodes opc = current->in(0)->Opcode(); + if ((final && (opc == Opcodes::Op_MemBarAcquire || + opc == Opcodes::Op_MemBarAcquireLock || + opc == Opcodes::Op_LoadFence)) || + opc == Opcodes::Op_MemBarRelease || + opc == Opcodes::Op_StoreFence || + opc == Opcodes::Op_MemBarReleaseLock || + opc == Opcodes::Op_MemBarStoreStore || + opc == Opcodes::Op_MemBarCPUOrder) { Node* mem = current->in(0)->in(TypeFunc::Memory); if (mem->is_MergeMem()) { MergeMemNode* merge = mem->as_MergeMem(); @@ -1126,10 +1126,10 @@ BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { - case Op_LoadUB: return this; - case Op_LoadUS: return this; - case Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break; - case Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; + case Opcodes::Op_LoadUB: return this; + case Opcodes::Op_LoadUS: return this; + case Opcodes::Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break; + case Opcodes::Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; default: assert(false, "no unsigned variant: %s", Name()); return NULL; @@ -1144,12 +1144,12 @@ BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { - case Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; - case Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; - case Op_LoadB: // fall through - case Op_LoadS: // fall through - case Op_LoadI: // fall through - case Op_LoadL: return this; + case Opcodes::Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; + case Opcodes::Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; + case Opcodes::Op_LoadB: // fall through + case Opcodes::Op_LoadS: // fall through + case Opcodes::Op_LoadI: // fall through + case Opcodes::Op_LoadL: return this; default: assert(false, "no signed variant: %s", Name()); return NULL; @@ -1200,7 +1200,7 @@ int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); if ((count > 0) && elements[0]->is_Con() && ((count == 1) || - (count == 2) && elements[1]->Opcode() == Op_LShiftX && + (count == 2) && elements[1]->Opcode() == Opcodes::Op_LShiftX && elements[1]->in(2) == phase->intcon(shift))) { ciObjArray* array = base_type->const_oop()->as_obj_array(); // Fetch the box object cache[0] at the base of the array and get its value @@ -1233,12 +1233,12 @@ // Remove the constant offset from the address and then result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset))); // remove the scaling of the offset to recover the original index. - if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { + if (result->Opcode() == Opcodes::Op_LShiftX && result->in(2) == phase->intcon(shift)) { // Peel the shift off directly but wrap it in a dummy node // since Ideal can't return existing nodes result = new RShiftXNode(result->in(1), phase->intcon(0)); } else if (result->is_Add() && result->in(2)->is_Con() && - result->in(1)->Opcode() == Op_LShiftX && + result->in(1)->Opcode() == Opcodes::Op_LShiftX && result->in(1)->in(2) == phase->intcon(shift)) { // We can't do general optimization: ((X<> Z ==> X + (Y>>Z) // but for boxing cache access we know that X< ... -> LoadB pair). // Need to preserve unboxing load type if it is unsigned. switch(this->Opcode()) { - case Op_LoadUB: + case Opcodes::Op_LoadUB: result = new AndINode(phase->transform(result), phase->intcon(0xFF)); break; - case Op_LoadUS: + case Opcodes::Op_LoadUS: result = new AndINode(phase->transform(result), phase->intcon(0xFFFF)); break; } @@ -1485,7 +1485,7 @@ // Skip up past a SafePoint control. Cannot do this for Stores because // pointer stores & cardmarks must stay on the same side of a SafePoint. - if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && + if( ctrl != NULL && ctrl->Opcode() == Opcodes::Op_SafePoint && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { ctrl = ctrl->in(0); set_req(MemNode::Control,ctrl); @@ -1604,18 +1604,18 @@ if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { // The field is Klass::_modifier_flags. Return its (constant) value. // (Folds up the 2nd indirection in aClassConstant.getModifiers().) - assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); + assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _modifier_flags"); return TypeInt::make(klass->modifier_flags()); } if (tkls->offset() == in_bytes(Klass::access_flags_offset())) { // The field is Klass::_access_flags. Return its (constant) value. // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) - assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); + assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _access_flags"); return TypeInt::make(klass->access_flags()); } if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) { // The field is Klass::_layout_helper. Return its constant value if known. - assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); + assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _layout_helper"); return TypeInt::make(klass->layout_helper()); } @@ -1682,7 +1682,7 @@ // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) - && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { + && Opcode() != Opcodes::Op_LoadKlass && Opcode() != Opcodes::Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. if (off_beyond_header) { // is the offset beyond the header? @@ -1737,7 +1737,7 @@ tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks - Opcode() == Op_LoadKlass, + Opcode() == Opcodes::Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise } @@ -1752,7 +1752,7 @@ if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { // The field is Klass::_super_check_offset. Return its (constant) value. // (Folds up type checking code.) - assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); + assert(Opcode() == Opcodes::Op_LoadI, "must load an int from _super_check_offset"); return TypeInt::make(klass->super_check_offset()); } // Compute index into primary_supers array @@ -1761,7 +1761,7 @@ if( depth < ciKlass::primary_super_limit() ) { // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) - assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); + assert(Opcode() == Opcodes::Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } @@ -1770,7 +1770,7 @@ if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) { // The field is Klass::_java_mirror. Return its (constant) value. // (Folds up the 2nd indirection in anObjConstant.getClass().) - assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); + assert(Opcode() == Opcodes::Op_LoadP, "must load an oop from _java_mirror"); return TypeInstPtr::make(klass->java_mirror()); } } @@ -1791,7 +1791,7 @@ depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) - assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); + assert(Opcode() == Opcodes::Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } @@ -1808,7 +1808,7 @@ ) { // Note: When interfaces are reliable, we can narrow the interface // test to (klass != Serializable && klass != Cloneable). - assert(Opcode() == Op_LoadI, "must load an int from _layout_helper"); + assert(Opcode() == Opcodes::Op_LoadI, "must load an int from _layout_helper"); jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); // The key property of this type is that it folds up tests // for array-ness, since it proves that the layout_helper is positive. @@ -2389,16 +2389,16 @@ // require exactly ONE user until such time as we clone 'mem' for // each of 'mem's uses (thus making the exactly-1-user-rule hold // true). - while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { + while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Opcodes::Op_StoreCM) { // Looking at a dead closed cycle of memory? assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); assert(Opcode() == st->Opcode() || - st->Opcode() == Op_StoreVector || - Opcode() == Op_StoreVector || + st->Opcode() == Opcodes::Op_StoreVector || + Opcode() == Opcodes::Op_StoreVector || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || - (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode + (Opcode() == Opcodes::Op_StoreL && st->Opcode() == Opcodes::Op_StoreI) || // expanded ClearArrayNode (is_mismatched_access() || st->as_Store()->is_mismatched_access()), - "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); + "no mismatched stores, except on raw memory: %s %s", NodeClassNames[static_cast(Opcode())], NodeClassNames[static_cast(st->Opcode())]); if (st->in(MemNode::Address)->eqv_uncast(address) && st->as_Store()->memory_size() <= this->memory_size()) { @@ -2521,7 +2521,7 @@ // (StoreB ... (valIn) ) Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { Node *val = in(MemNode::ValueIn); - if( val->Opcode() == Op_AndI ) { + if( val->Opcode() == Opcodes::Op_AndI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() & mask) == mask ) { set_req(MemNode::ValueIn, val->in(1)); @@ -2539,11 +2539,11 @@ // (StoreB ... (valIn) ) Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { Node *val = in(MemNode::ValueIn); - if( val->Opcode() == Op_RShiftI ) { + if( val->Opcode() == Opcodes::Op_RShiftI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() <= num_bits) ) { Node *shl = val->in(1); - if( shl->Opcode() == Op_LShiftI ) { + if( shl->Opcode() == Opcodes::Op_LShiftI ) { const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { set_req(MemNode::ValueIn, shl->in(1)); @@ -2675,14 +2675,14 @@ init_class_id(Class_LoadStore); } -uint LoadStoreNode::ideal_reg() const { +Opcodes LoadStoreNode::ideal_reg() const { return _type->ideal_reg(); } bool LoadStoreNode::result_not_used() const { for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { Node *x = fast_out(i); - if (x->Opcode() == Op_SCMemProj) continue; + if (x->Opcode() == Opcodes::Op_SCMemProj) continue; return false; } return true; @@ -2749,7 +2749,7 @@ if (atp == NULL) atp = TypePtr::BOTTOM; else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes - if( adr->Opcode() != Op_AddP ) Unimplemented(); + if( adr->Opcode() != Opcodes::Op_AddP ) Unimplemented(); Node *base = adr->in(1); Node *zero = phase->makecon(TypeLong::ZERO); @@ -2891,19 +2891,19 @@ } //------------------------------make------------------------------------------- -MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { +MemBarNode* MemBarNode::make(Compile* C, Opcodes opcode, int atp, Node* pn) { switch (opcode) { - case Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn); - case Op_LoadFence: return new LoadFenceNode(C, atp, pn); - case Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn); - case Op_StoreFence: return new StoreFenceNode(C, atp, pn); - case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); - case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); - case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); - case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); - case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); - case Op_Initialize: return new InitializeNode(C, atp, pn); - case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn); + case Opcodes::Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn); + case Opcodes::Op_LoadFence: return new LoadFenceNode(C, atp, pn); + case Opcodes::Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn); + case Opcodes::Op_StoreFence: return new StoreFenceNode(C, atp, pn); + case Opcodes::Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); + case Opcodes::Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); + case Opcodes::Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); + case Opcodes::Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); + case Opcodes::Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); + case Opcodes::Op_Initialize: return new InitializeNode(C, atp, pn); + case Opcodes::Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn); default: ShouldNotReachHere(); return NULL; } } @@ -2922,15 +2922,15 @@ // Eliminate volatile MemBars for scalar replaced objects. if (can_reshape && req() == (Precedent+1)) { bool eliminate = false; - int opc = Opcode(); - if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { + Opcodes opc = Opcode(); + if ((opc == Opcodes::Op_MemBarAcquire || opc == Opcodes::Op_MemBarVolatile)) { // Volatile field loads and stores. Node* my_mem = in(MemBarNode::Precedent); // The MembarAquire may keep an unused LoadNode alive through the Precedent edge - if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { + if ((my_mem != NULL) && (opc == Opcodes::Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { // if the Precedent is a decodeN and its input (a Load) is used at more than one place, // replace this Precedent (decodeN) with the Load instead. - if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { + if ((my_mem->Opcode() == Opcodes::Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { Node* load_node = my_mem->in(1); set_req(MemBarNode::Precedent, load_node); phase->is_IterGVN()->_worklist.push(my_mem); @@ -2952,7 +2952,7 @@ eliminate = true; } } - } else if (opc == Op_MemBarRelease) { + } else if (opc == Opcodes::Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && @@ -2988,7 +2988,7 @@ switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: - return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); + return new MachProjNode(this,proj->_con,RegMask::Empty,static_cast(MachProjNode::projType::unmatched_proj)); } ShouldNotReachHere(); return NULL; @@ -3103,7 +3103,7 @@ const RegMask &InitializeNode::in_RegMask(uint idx) const { // This edge should be set to top, by the set_complete. But be conservative. if (idx == InitializeNode::RawAddress) - return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]); + return *(Compile::current()->matcher()->idealreg2spillmask[static_cast(in(idx)->ideal_reg())]); return RegMask::Empty; } @@ -3585,7 +3585,7 @@ } if (type == T_LONG && Matcher::isSimpleConstant64(con) && - st->Opcode() == Op_StoreL) { + st->Opcode() == Opcodes::Op_StoreL) { continue; // This StoreL is already optimal. } @@ -3598,7 +3598,7 @@ && (st_off & BytesPerInt) == BytesPerInt) { jlong lcon = tiles[j]; if (!Matcher::isSimpleConstant64(lcon) && - st->Opcode() == Op_StoreI) { + st->Opcode() == Opcodes::Op_StoreI) { // This StoreI is already optimal by itself. jint* intcon = (jint*) &tiles[j]; intcon[1] = 0; // undo the store_constant() @@ -3609,7 +3609,7 @@ st = nodes[j]; st_off -= BytesPerInt; con = intcon[0]; - if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { + if (con != 0 && st != NULL && st->Opcode() == Opcodes::Op_StoreI) { assert(st_off >= header_size, "still ignoring header"); assert(get_store_offset(st, phase) == st_off, "must be"); assert(in(i-1) == zmem, "must be"); @@ -3932,7 +3932,7 @@ intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); if (zeroes_done + BytesPerLong >= size_limit) { assert(allocation() != NULL, ""); - if (allocation()->Opcode() == Op_Allocate) { + if (allocation()->Opcode() == Opcodes::Op_Allocate) { Node* klass_node = allocation()->in(AllocateNode::KlassNode); ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); if (zeroes_done == k->layout_helper())