--- old/src/share/vm/opto/library_call.cpp 2016-07-11 22:46:30.049435030 +0900 +++ new/src/share/vm/opto/library_call.cpp 2016-07-11 22:46:29.899435555 +0900 @@ -207,7 +207,7 @@ Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); - Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae); + Node* make_string_method_node(Opcodes opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae); bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae); @@ -480,7 +480,7 @@ if (!jvms()->has_method()) { // Root JVMState has a null method. - assert(map()->memory()->Opcode() == Op_Parm, ""); + assert(map()->memory()->Opcode() == Opcodes::Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } @@ -842,7 +842,7 @@ Node* LibraryCallKit::try_to_predicate(int predicate) { if (!jvms()->has_method()) { // Root JVMState has a null method. - assert(map()->memory()->Opcode() == Op_Parm, ""); + assert(map()->memory()->Opcode() == Opcodes::Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } @@ -1022,18 +1022,18 @@ // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes // containing the lengths of str1 and str2. -Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) { +Node* LibraryCallKit::make_string_method_node(Opcodes opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) { Node* result = NULL; switch (opcode) { - case Op_StrIndexOf: + case Opcodes::Op_StrIndexOf: result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; - case Op_StrComp: + case Opcodes::Op_StrComp: result = new StrCompNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; - case Op_StrEquals: + case Opcodes::Op_StrEquals: // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals'). // Use the constant length if there is one because optimized match rule may exist. result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES), @@ -1063,7 +1063,7 @@ Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE); Node* arg2_cnt = load_array_length(arg2); - Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); + Node* result = make_string_method_node(Opcodes::Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); set_result(result); return true; } @@ -1098,7 +1098,7 @@ // Check for count == 0 is done by assembler code for StrEquals. if (!stopped()) { - Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); + Node* equals = make_string_method_node(Opcodes::Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); phi->init_req(1, equals); region->init_req(1, control()); } @@ -1196,7 +1196,7 @@ //------------------------------inline_string_indexOf------------------------ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) { - if (!Matcher::match_rule_supported(Op_StrIndexOf)) { + if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOf)) { return false; } Node* src = argument(0); @@ -1240,7 +1240,7 @@ if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } - if (!Matcher::match_rule_supported(Op_StrIndexOf)) { + if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOf)) { return false; } assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments"); @@ -1315,7 +1315,7 @@ } } if (!stopped()) { - return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae); + return make_string_method_node(Opcodes::Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae); } return NULL; } @@ -1325,7 +1325,7 @@ if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } - if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) { + if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOfChar)) { return false; } assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments"); @@ -1445,7 +1445,7 @@ // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. - insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); + insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } if (compress) { set_result(_gvn.transform(count)); @@ -1537,9 +1537,9 @@ // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. - insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); + insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } // original reexecute is set back here @@ -1621,9 +1621,9 @@ // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. - insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); + insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } @@ -1829,8 +1829,8 @@ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10"); // These intrinsics are supported on all hardware - case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false; - case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false; + case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Opcodes::Op_SqrtD) ? inline_math(id) : false; + case vmIntrinsics::_dabs: return Matcher::has_match_rule(Opcodes::Op_AbsD) ? inline_math(id) : false; case vmIntrinsics::_dexp: return StubRoutines::dexp() != NULL ? @@ -1974,7 +1974,7 @@ // It can simplify the index computation for Arrays.copyOf // and similar uses of System.arraycopy. // First, compute the normalized version of CmpI(x, y). - int cmp_op = Op_CmpI; + Opcodes cmp_op = Opcodes::Op_CmpI; Node* xkey = xvalue; Node* ykey = yvalue; Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey)); @@ -2287,7 +2287,7 @@ if (need_mem_bar) { // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } // Update IdealKit from graphKit. __ sync_kit(this); @@ -2512,10 +2512,10 @@ case Release: case Volatile: if (is_store) { - insert_mem_bar(Op_MemBarRelease); + insert_mem_bar(Opcodes::Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Opcodes::Op_MemBarVolatile); } } break; @@ -2528,7 +2528,7 @@ // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. - if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); + if (need_mem_bar) insert_mem_bar(Opcodes::Op_MemBarCPUOrder); if (!is_store) { Node* p = NULL; @@ -2627,10 +2627,10 @@ case Acquire: case Volatile: if (!is_store) { - insert_mem_bar(Op_MemBarAcquire); + insert_mem_bar(Opcodes::Op_MemBarAcquire); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Opcodes::Op_MemBarVolatile); } } break; @@ -2638,7 +2638,7 @@ ShouldNotReachHere(); } - if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); + if (need_mem_bar) insert_mem_bar(Opcodes::Op_MemBarCPUOrder); return true; } @@ -2844,19 +2844,19 @@ case Acquire: break; case Release: - insert_mem_bar(Op_MemBarRelease); + insert_mem_bar(Opcodes::Op_MemBarRelease); break; case Volatile: if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Opcodes::Op_MemBarVolatile); } else { - insert_mem_bar(Op_MemBarRelease); + insert_mem_bar(Opcodes::Op_MemBarRelease); } break; default: ShouldNotReachHere(); } - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // Figure out the memory ordering. MemNode::MemOrd mo = access_kind_to_memord(access_kind); @@ -3096,7 +3096,7 @@ } // Add the trailing membar surrounding the access - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); switch (access_kind) { case Relaxed: @@ -3104,7 +3104,7 @@ break; // do nothing case Acquire: case Volatile: - insert_mem_bar(Op_MemBarAcquire); + insert_mem_bar(Opcodes::Op_MemBarAcquire); // !support_IRIW_for_not_multiple_copy_atomic_cpu handled in platform code break; default: @@ -3149,16 +3149,16 @@ bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { // Regardless of form, don't allow previous ld/st to move down, // then issue acquire, release, or volatile mem_bar. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); switch(id) { case vmIntrinsics::_loadFence: - insert_mem_bar(Op_LoadFence); + insert_mem_bar(Opcodes::Op_LoadFence); return true; case vmIntrinsics::_storeFence: - insert_mem_bar(Op_StoreFence); + insert_mem_bar(Opcodes::Op_StoreFence); return true; case vmIntrinsics::_fullFence: - insert_mem_bar(Op_MemBarVolatile); + insert_mem_bar(Opcodes::Op_MemBarVolatile); return true; default: fatal_unexpected_iid(id); @@ -3167,7 +3167,7 @@ } bool LibraryCallKit::inline_onspinwait() { - insert_mem_bar(Op_OnSpinWait); + insert_mem_bar(Opcodes::Op_OnSpinWait); return true; } @@ -3264,7 +3264,7 @@ // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag // out of the function. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); RegionNode* result_rgn = new RegionNode(PATH_LIMIT); PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL); @@ -4488,7 +4488,7 @@ // Conservatively insert a memory barrier on all memory slices. // Do not let writes of the copy source or destination float below the copy. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // Call it. Note that the length argument is not scaled. make_runtime_call(RC_LEAF|RC_NO_FP, @@ -4499,7 +4499,7 @@ src, dst, size XTOP); // Do not let reads of the copy destination float above the copy. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); return true; } @@ -4596,9 +4596,9 @@ // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. - insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); + insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } @@ -4659,7 +4659,7 @@ // Conservatively insert a memory barrier on all memory slices. // Do not let writes into the original float below the clone. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // paths into result_reg: enum { @@ -5966,7 +5966,7 @@ // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. - insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Opcodes::Op_MemBarCPUOrder); set_result(result); return true; @@ -6017,7 +6017,7 @@ } if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { - insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier + insert_mem_bar(Opcodes::Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; @@ -6027,7 +6027,7 @@ // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar - insert_mem_bar(Op_MemBarAcquire, loadedField); + insert_mem_bar(Opcodes::Op_MemBarAcquire, loadedField); } return loadedField; }