< prev index next >

src/share/vm/opto/library_call.cpp

Print this page

        

*** 205,215 **** return generate_method_call(method_id, true, false); } Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); ! Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae); bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae); Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count, RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae); --- 205,215 ---- return generate_method_call(method_id, true, false); } Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls); ! Node* make_string_method_node(Opcodes opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae); bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae); bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae); Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count, RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
*** 478,488 **** const bool is_static = true; const bool is_volatile = true; if (!jvms()->has_method()) { // Root JVMState has a null method. ! assert(map()->memory()->Opcode() == Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } assert(merged_memory(), ""); --- 478,488 ---- const bool is_static = true; const bool is_volatile = true; if (!jvms()->has_method()) { // Root JVMState has a null method. ! assert(map()->memory()->Opcode() == Opcodes::Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } assert(merged_memory(), "");
*** 840,850 **** } Node* LibraryCallKit::try_to_predicate(int predicate) { if (!jvms()->has_method()) { // Root JVMState has a null method. ! assert(map()->memory()->Opcode() == Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } assert(merged_memory(), ""); --- 840,850 ---- } Node* LibraryCallKit::try_to_predicate(int predicate) { if (!jvms()->has_method()) { // Root JVMState has a null method. ! assert(map()->memory()->Opcode() == Opcodes::Op_Parm, ""); // Insert the memory aliasing node set_all_memory(reset_memory()); } assert(merged_memory(), "");
*** 1020,1041 **** //------------------------------make_string_method_node------------------------ // Helper method for String intrinsic functions. This version is called with // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes // containing the lengths of str1 and str2. ! Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) { Node* result = NULL; switch (opcode) { ! case Op_StrIndexOf: result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; ! case Op_StrComp: result = new StrCompNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; ! case Op_StrEquals: // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals'). // Use the constant length if there is one because optimized match rule may exist. result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES), str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae); break; --- 1020,1041 ---- //------------------------------make_string_method_node------------------------ // Helper method for String intrinsic functions. This version is called with // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes // containing the lengths of str1 and str2. ! Node* LibraryCallKit::make_string_method_node(Opcodes opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) { Node* result = NULL; switch (opcode) { ! case Opcodes::Op_StrIndexOf: result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; ! case Opcodes::Op_StrComp: result = new StrCompNode(control(), memory(TypeAryPtr::BYTES), str1_start, cnt1, str2_start, cnt2, ae); break; ! case Opcodes::Op_StrEquals: // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals'). // Use the constant length if there is one because optimized match rule may exist. result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES), str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae); break;
*** 1061,1071 **** // Get start addr and length of second argument Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE); Node* arg2_cnt = load_array_length(arg2); ! Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); set_result(result); return true; } //------------------------------inline_string_equals------------------------ --- 1061,1071 ---- // Get start addr and length of second argument Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE); Node* arg2_cnt = load_array_length(arg2); ! Node* result = make_string_method_node(Opcodes::Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); set_result(result); return true; } //------------------------------inline_string_equals------------------------
*** 1096,1106 **** } // Check for count == 0 is done by assembler code for StrEquals. if (!stopped()) { ! Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); phi->init_req(1, equals); region->init_req(1, control()); } } --- 1096,1106 ---- } // Check for count == 0 is done by assembler code for StrEquals. if (!stopped()) { ! Node* equals = make_string_method_node(Opcodes::Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae); phi->init_req(1, equals); region->init_req(1, control()); } }
*** 1194,1204 **** return true; } //------------------------------inline_string_indexOf------------------------ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) { ! if (!Matcher::match_rule_supported(Op_StrIndexOf)) { return false; } Node* src = argument(0); Node* tgt = argument(1); --- 1194,1204 ---- return true; } //------------------------------inline_string_indexOf------------------------ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) { ! if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOf)) { return false; } Node* src = argument(0); Node* tgt = argument(1);
*** 1238,1248 **** //-----------------------------inline_string_indexOf----------------------- bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } ! if (!Matcher::match_rule_supported(Op_StrIndexOf)) { return false; } assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments"); Node* src = argument(0); // byte[] Node* src_count = argument(1); // char count --- 1238,1248 ---- //-----------------------------inline_string_indexOf----------------------- bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } ! if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOf)) { return false; } assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments"); Node* src = argument(0); // byte[] Node* src_count = argument(1); // char count
*** 1313,1333 **** phi->init_req(2, intcon(0)); region->init_req(2, if_zero); } } if (!stopped()) { ! return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae); } return NULL; } //-----------------------------inline_string_indexOfChar----------------------- bool LibraryCallKit::inline_string_indexOfChar() { if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } ! if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) { return false; } assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments"); Node* src = argument(0); // byte[] Node* tgt = argument(1); // tgt is int ch --- 1313,1333 ---- phi->init_req(2, intcon(0)); region->init_req(2, if_zero); } } if (!stopped()) { ! return make_string_method_node(Opcodes::Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae); } return NULL; } //-----------------------------inline_string_indexOfChar----------------------- bool LibraryCallKit::inline_string_indexOfChar() { if (too_many_traps(Deoptimization::Reason_intrinsic)) { return false; } ! if (!Matcher::match_rule_supported(Opcodes::Op_StrIndexOfChar)) { return false; } assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments"); Node* src = argument(0); // byte[] Node* tgt = argument(1); // tgt is int ch
*** 1443,1453 **** // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } if (compress) { set_result(_gvn.transform(count)); } return true; --- 1443,1453 ---- // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } if (compress) { set_result(_gvn.transform(count)); } return true;
*** 1535,1547 **** // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Op_MemBarCPUOrder); } } // original reexecute is set back here C->set_has_split_ifs(true); // Has chance for split-if optimization if (!stopped()) { --- 1535,1547 ---- // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } // original reexecute is set back here C->set_has_split_ifs(true); // Has chance for split-if optimization if (!stopped()) {
*** 1619,1631 **** // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Op_MemBarCPUOrder); } } C->set_has_split_ifs(true); // Has chance for split-if optimization return true; --- 1619,1631 ---- // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } C->set_has_split_ifs(true); // Has chance for split-if optimization return true;
*** 1827,1838 **** return StubRoutines::dlog10() != NULL ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") : runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10"); // These intrinsics are supported on all hardware ! case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false; ! case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false; case vmIntrinsics::_dexp: return StubRoutines::dexp() != NULL ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") : runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP"); --- 1827,1838 ---- return StubRoutines::dlog10() != NULL ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") : runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10"); // These intrinsics are supported on all hardware ! case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Opcodes::Op_SqrtD) ? inline_math(id) : false; ! case vmIntrinsics::_dabs: return Matcher::has_match_rule(Opcodes::Op_AbsD) ? inline_math(id) : false; case vmIntrinsics::_dexp: return StubRoutines::dexp() != NULL ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") : runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
*** 1972,1982 **** // Try to find a dominating comparison of these guys. // It can simplify the index computation for Arrays.copyOf // and similar uses of System.arraycopy. // First, compute the normalized version of CmpI(x, y). ! int cmp_op = Op_CmpI; Node* xkey = xvalue; Node* ykey = yvalue; Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey)); if (ideal_cmpxy->is_Cmp()) { // E.g., if we have CmpI(length - offset, count), --- 1972,1982 ---- // Try to find a dominating comparison of these guys. // It can simplify the index computation for Arrays.copyOf // and similar uses of System.arraycopy. // First, compute the normalized version of CmpI(x, y). ! Opcodes cmp_op = Opcodes::Op_CmpI; Node* xkey = xvalue; Node* ykey = yvalue; Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey)); if (ideal_cmpxy->is_Cmp()) { // E.g., if we have CmpI(length - offset, count),
*** 2285,2295 **** pre_val /* pre_val */, T_OBJECT); if (need_mem_bar) { // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. ! insert_mem_bar(Op_MemBarCPUOrder); } // Update IdealKit from graphKit. __ sync_kit(this); } __ end_if(); // _ref_type != ref_none --- 2285,2295 ---- pre_val /* pre_val */, T_OBJECT); if (need_mem_bar) { // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } // Update IdealKit from graphKit. __ sync_kit(this); } __ end_if(); // _ref_type != ref_none
*** 2510,2523 **** case Acquire: break; case Release: case Volatile: if (is_store) { ! insert_mem_bar(Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } break; default: ShouldNotReachHere(); --- 2510,2523 ---- case Acquire: break; case Release: case Volatile: if (is_store) { ! insert_mem_bar(Opcodes::Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Opcodes::Op_MemBarVolatile); } } break; default: ShouldNotReachHere();
*** 2526,2536 **** // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. ! if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field ciField* field = alias_type->field(); --- 2526,2536 ---- // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. ! if (need_mem_bar) insert_mem_bar(Opcodes::Op_MemBarCPUOrder); if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field ciField* field = alias_type->field();
*** 2625,2646 **** case Release: break; case Acquire: case Volatile: if (!is_store) { ! insert_mem_bar(Op_MemBarAcquire); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } break; default: ShouldNotReachHere(); } ! if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); return true; } //----------------------------inline_unsafe_load_store---------------------------- --- 2625,2646 ---- case Release: break; case Acquire: case Volatile: if (!is_store) { ! insert_mem_bar(Opcodes::Op_MemBarAcquire); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Opcodes::Op_MemBarVolatile); } } break; default: ShouldNotReachHere(); } ! if (need_mem_bar) insert_mem_bar(Opcodes::Op_MemBarCPUOrder); return true; } //----------------------------inline_unsafe_load_store----------------------------
*** 2842,2864 **** switch (access_kind) { case Relaxed: case Acquire: break; case Release: ! insert_mem_bar(Op_MemBarRelease); break; case Volatile: if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } else { ! insert_mem_bar(Op_MemBarRelease); } break; default: ShouldNotReachHere(); } ! insert_mem_bar(Op_MemBarCPUOrder); // Figure out the memory ordering. MemNode::MemOrd mo = access_kind_to_memord(access_kind); // 4984716: MemBars must be inserted before this --- 2842,2864 ---- switch (access_kind) { case Relaxed: case Acquire: break; case Release: ! insert_mem_bar(Opcodes::Op_MemBarRelease); break; case Volatile: if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Opcodes::Op_MemBarVolatile); } else { ! insert_mem_bar(Opcodes::Op_MemBarRelease); } break; default: ShouldNotReachHere(); } ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // Figure out the memory ordering. MemNode::MemOrd mo = access_kind_to_memord(access_kind); // 4984716: MemBars must be inserted before this
*** 3094,3112 **** T_OBJECT); } } // Add the trailing membar surrounding the access ! insert_mem_bar(Op_MemBarCPUOrder); switch (access_kind) { case Relaxed: case Release: break; // do nothing case Acquire: case Volatile: ! insert_mem_bar(Op_MemBarAcquire); // !support_IRIW_for_not_multiple_copy_atomic_cpu handled in platform code break; default: ShouldNotReachHere(); } --- 3094,3112 ---- T_OBJECT); } } // Add the trailing membar surrounding the access ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); switch (access_kind) { case Relaxed: case Release: break; // do nothing case Acquire: case Volatile: ! insert_mem_bar(Opcodes::Op_MemBarAcquire); // !support_IRIW_for_not_multiple_copy_atomic_cpu handled in platform code break; default: ShouldNotReachHere(); }
*** 3147,3175 **** } bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { // Regardless of form, don't allow previous ld/st to move down, // then issue acquire, release, or volatile mem_bar. ! insert_mem_bar(Op_MemBarCPUOrder); switch(id) { case vmIntrinsics::_loadFence: ! insert_mem_bar(Op_LoadFence); return true; case vmIntrinsics::_storeFence: ! insert_mem_bar(Op_StoreFence); return true; case vmIntrinsics::_fullFence: ! insert_mem_bar(Op_MemBarVolatile); return true; default: fatal_unexpected_iid(id); return false; } } bool LibraryCallKit::inline_onspinwait() { ! insert_mem_bar(Op_OnSpinWait); return true; } bool LibraryCallKit::klass_needs_init_guard(Node* kls) { if (!kls->is_Con()) { --- 3147,3175 ---- } bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { // Regardless of form, don't allow previous ld/st to move down, // then issue acquire, release, or volatile mem_bar. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); switch(id) { case vmIntrinsics::_loadFence: ! insert_mem_bar(Opcodes::Op_LoadFence); return true; case vmIntrinsics::_storeFence: ! insert_mem_bar(Opcodes::Op_StoreFence); return true; case vmIntrinsics::_fullFence: ! insert_mem_bar(Opcodes::Op_MemBarVolatile); return true; default: fatal_unexpected_iid(id); return false; } } bool LibraryCallKit::inline_onspinwait() { ! insert_mem_bar(Opcodes::Op_OnSpinWait); return true; } bool LibraryCallKit::klass_needs_init_guard(Node* kls) { if (!kls->is_Con()) {
*** 3262,3272 **** PATH_LIMIT }; // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag // out of the function. ! insert_mem_bar(Op_MemBarCPUOrder); RegionNode* result_rgn = new RegionNode(PATH_LIMIT); PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL); RegionNode* slow_region = new RegionNode(1); --- 3262,3272 ---- PATH_LIMIT }; // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag // out of the function. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); RegionNode* result_rgn = new RegionNode(PATH_LIMIT); PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL); RegionNode* slow_region = new RegionNode(1);
*** 4486,4507 **** Node* src = make_unsafe_address(src_ptr, src_off); Node* dst = make_unsafe_address(dst_ptr, dst_off); // Conservatively insert a memory barrier on all memory slices. // Do not let writes of the copy source or destination float below the copy. ! insert_mem_bar(Op_MemBarCPUOrder); // Call it. Note that the length argument is not scaled. make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::fast_arraycopy_Type(), StubRoutines::unsafe_arraycopy(), "unsafe_arraycopy", TypeRawPtr::BOTTOM, src, dst, size XTOP); // Do not let reads of the copy destination float above the copy. ! insert_mem_bar(Op_MemBarCPUOrder); return true; } //------------------------clone_coping----------------------------------- --- 4486,4507 ---- Node* src = make_unsafe_address(src_ptr, src_off); Node* dst = make_unsafe_address(dst_ptr, dst_off); // Conservatively insert a memory barrier on all memory slices. // Do not let writes of the copy source or destination float below the copy. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // Call it. Note that the length argument is not scaled. make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::fast_arraycopy_Type(), StubRoutines::unsafe_arraycopy(), "unsafe_arraycopy", TypeRawPtr::BOTTOM, src, dst, size XTOP); // Do not let reads of the copy destination float above the copy. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); return true; } //------------------------clone_coping-----------------------------------
*** 4594,4606 **** // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Op_MemBarCPUOrder); } } //------------------------inline_native_clone---------------------------- // protected native Object java.lang.Object.clone(); --- 4594,4606 ---- // other threads. // Record what AllocateNode this StoreStore protects so that // escape analysis can go from the MemBarStoreStoreNode to the // AllocateNode and eliminate the MemBarStoreStoreNode if possible // based on the escape status of the AllocateNode. ! insert_mem_bar(Opcodes::Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress)); } else { ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); } } //------------------------inline_native_clone---------------------------- // protected native Object java.lang.Object.clone();
*** 4657,4667 **** ? tklass->as_instance_type() : TypeInstPtr::NOTNULL); // Conservatively insert a memory barrier on all memory slices. // Do not let writes into the original float below the clone. ! insert_mem_bar(Op_MemBarCPUOrder); // paths into result_reg: enum { _slow_path = 1, // out-of-line call to clone method (virtual or not) _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy --- 4657,4667 ---- ? tklass->as_instance_type() : TypeInstPtr::NOTNULL); // Conservatively insert a memory barrier on all memory slices. // Do not let writes into the original float below the clone. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); // paths into result_reg: enum { _slow_path = 1, // out-of-line call to clone method (virtual or not) _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
*** 5964,5974 **** result /* pre_val */, T_OBJECT); // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. ! insert_mem_bar(Op_MemBarCPUOrder); set_result(result); return true; } --- 5964,5974 ---- result /* pre_val */, T_OBJECT); // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. ! insert_mem_bar(Opcodes::Op_MemBarCPUOrder); set_result(result); return true; }
*** 6015,6035 **** } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! insert_mem_bar(Op_MemBarAcquire, loadedField); } return loadedField; } Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, --- 6015,6035 ---- } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! insert_mem_bar(Opcodes::Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! insert_mem_bar(Opcodes::Op_MemBarAcquire, loadedField); } return loadedField; } Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
< prev index next >