/* * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "opto/arraycopynode.hpp" #include "opto/graphKit.hpp" #include "opto/idealKit.hpp" #include "opto/narrowptrnode.hpp" #include "gc/shared/c2_BarrierSetCodeGen.hpp" #include "utilities/macros.hpp" Node* C2BarrierSetCodeGen::store_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { bool mismatched = (decorators & C2_MISMATCHED) != 0; bool is_vol = (decorators & C2_MO_VOLATILE) != 0; bool is_release = (decorators & C2_MO_RELEASE) != 0; bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0; bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0; if (bt == T_DOUBLE) { val = kit->dstore_rounding(val); } MemNode::MemOrd mo; if (is_vol || is_release) { mo = MemNode::release; } else { // Volatile fields need releasing stores. // Non-volatile fields also need releasing stores if they hold an // object reference, because the object reference might point to // a freshly created object. // Conservatively release stores of object references. mo = StoreNode::release_if_reference(bt); } return kit->store_to_memory(kit->control(), adr, val, bt, adr_type, mo, requires_atomic_access, unaligned, mismatched); } Node* C2BarrierSetCodeGen::load_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { bool mismatched = (decorators & C2_MISMATCHED) != 0; bool is_vol = (decorators & C2_MO_VOLATILE) != 0; bool is_acquire = (decorators & C2_MO_RELEASE) != 0; bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0; bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0; bool no_control = (decorators & C2_ACCESS_FREE_CONTROL) != 0; MemNode::MemOrd mo; if (is_vol || is_acquire) { mo = MemNode::acquire; } else { mo = MemNode::unordered; } LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest; if (anonymous) { // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node Node* ctrl = kit->control(); if (adr_type->isa_instptr()) { assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null"); intptr_t offset = Type::OffsetBot; AddPNode::Ideal_base_and_offset(adr, &kit->gvn(), offset); if (offset >= 0) { int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper()); if (offset < s) { // Guaranteed to be a valid access, no need to pin it no_control = true; } else { dep = LoadNode::Pinned; } } } } // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node return kit->make_load(!no_control ? kit->control() : NULL, adr, val_type, bt, adr_type, mo, dep, requires_atomic_access, unaligned, mismatched); } Node* C2BarrierSetCodeGen::store_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); bool mismatched = (decorators & C2_MISMATCHED) != 0; bool is_vol = (decorators & C2_MO_VOLATILE) != 0; bool is_release = (decorators & C2_MO_RELEASE) != 0; bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0; // We will need memory barriers unless we can determine a unique // alias category for this reference. (Note: If for some reason // the barriers get omitted and the unsafe reference begins to "pollute" // the alias analysis of the rest of the graph, either Compile::can_alias // or Compile::must_alias will throw a diagnostic assert.) bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap); if (need_cpu_mem_bar) { kit->insert_mem_bar(Op_MemBarCPUOrder); } // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. if (is_vol || is_release) { kit->insert_mem_bar(Op_MemBarRelease); } Node* store = store_at_resolved(kit, obj, adr, adr_type, val, val_type, bt, decorators); // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. // If not multiple copy atomic, we do the MemBarVolatile before the load. if (is_vol && !support_IRIW_for_not_multiple_copy_atomic_cpu) { kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar } if (need_cpu_mem_bar) { kit->insert_mem_bar(Op_MemBarCPUOrder); } return store; } Node* C2BarrierSetCodeGen::load_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); bool mismatched = (decorators & C2_MISMATCHED) != 0; bool is_vol = (decorators & C2_MO_VOLATILE) != 0; bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0; bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap); // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. if (need_cpu_mem_bar) { kit->insert_mem_bar(Op_MemBarCPUOrder); } if (is_vol && support_IRIW_for_not_multiple_copy_atomic_cpu) { kit->insert_mem_bar(Op_MemBarVolatile); } Node* p = load_at_resolved(kit, obj, adr, adr_type, val_type, bt, decorators); if (is_vol || is_acquire) { kit->insert_mem_bar(Op_MemBarAcquire, p); } if (need_cpu_mem_bar) { kit->insert_mem_bar(Op_MemBarCPUOrder); } return p; } //--------------------------- atomic operations--------------------------------- static MemNode::MemOrd atomic_op_mo_from_decorators(C2DecoratorSet decorators) { if ((decorators & C2_MO_RELEASE) != 0) { return MemNode::release; } else if ((decorators & C2_MO_ACQUIRE) != 0) { return MemNode::acquire; } else if ((decorators & C2_MO_VOLATILE) != 0) { return MemNode::seqcst; } else { return MemNode::unordered; } } static void pin_atomic_op(GraphKit* kit, Node* load_store, int alias_idx) { // SCMemProjNodes represent the memory state of a LoadStore. Their // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); kit->set_memory(proj, alias_idx); } static Node* atomic_op_membar_prologue(GraphKit* kit, C2DecoratorSet decorators, int alias_idx) { bool is_release = (decorators & C2_MO_RELEASE) != 0; bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) { kit->insert_mem_bar(Op_MemBarRelease); } else if (is_volatile) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { kit->insert_mem_bar(Op_MemBarVolatile); } else { kit->insert_mem_bar(Op_MemBarRelease); } } kit->insert_mem_bar(Op_MemBarCPUOrder); // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler. Node *mem = kit->memory(alias_idx); return mem; } static void atomic_op_membar_epilogue(GraphKit* kit, C2DecoratorSet decorators) { bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; // Add the trailing membar surrounding the access kit->insert_mem_bar(Op_MemBarCPUOrder); if (is_acquire || is_volatile) { kit->insert_mem_bar(Op_MemBarAcquire); } } Node* C2BarrierSetCodeGen::cas_val_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* expected_val, Node* new_val, const Type* value_type, Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) { bool is_obj = bt == T_OBJECT || bt == T_ARRAY; MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators); if (is_obj) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); } else #endif { load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); } } else { switch (bt) { case T_BYTE: { load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); break; } case T_SHORT: { load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); break; } case T_INT: { load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); break; } case T_LONG: { load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); break; } default: ShouldNotReachHere(); } } return load_store; } Node* C2BarrierSetCodeGen::cas_bool_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* expected_val, Node* new_val, const Type* value_type, Node* mem, BasicType bt, C2DecoratorSet decorators) { bool is_obj = bt == T_OBJECT || bt == T_ARRAY; bool is_weak_cas = (decorators & C2_WEAK_CAS) != 0; MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators); Node* load_store = NULL; if (is_obj) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); } } else #endif { if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); } } } else { switch(bt) { case T_BYTE: { if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); } break; } case T_SHORT: { if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); } break; } case T_INT: { if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); } break; } case T_LONG: { if (is_weak_cas) { load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); } else { load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); } break; } default: ShouldNotReachHere(); } } return load_store; } Node* C2BarrierSetCodeGen::swap_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* new_val, const Type* value_type, Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) { bool is_obj = bt == T_OBJECT || bt == T_ARRAY; if (is_obj) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop())); } else #endif { load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr())); } } else { switch (bt) { case T_BYTE: load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type)); break; case T_SHORT: load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type)); break; case T_INT: load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type)); break; case T_LONG: load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type)); break; default: ShouldNotReachHere(); } } pin_atomic_op(kit, load_store, alias_idx); Node* result = load_store; #ifdef _LP64 if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) { result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); } #endif return result; } Node* C2BarrierSetCodeGen::fetch_and_add_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* new_val, const Type* value_type, Node* mem, BasicType bt, C2DecoratorSet decorators) { Node* load_store = NULL; switch(bt) { case T_BYTE: load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type)); break; case T_SHORT: load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type)); break; case T_INT: load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type)); break; case T_LONG: load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type)); break; default: ShouldNotReachHere(); } pin_atomic_op(kit, load_store, alias_idx); return load_store; } Node* C2BarrierSetCodeGen::cas_val_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* expected_val, Node* new_val, const Type* value_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); Node* load_store = NULL; Node* result = cas_val_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, load_store, bt, decorators); pin_atomic_op(kit, result, alias_idx); #ifdef _LP64 bool is_obj = bt == T_OBJECT || bt == T_ARRAY; if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) { result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); } #endif atomic_op_membar_epilogue(kit, decorators); return result; } Node* C2BarrierSetCodeGen::cas_bool_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* expected_val, Node* new_val, const Type* value_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); Node* result = cas_bool_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, bt, decorators); pin_atomic_op(kit, result, alias_idx); atomic_op_membar_epilogue(kit, decorators); return result; } Node* C2BarrierSetCodeGen::swap_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* new_val, const Type* value_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); Node* load_store = NULL; Node* result = swap_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, load_store, bt, decorators); atomic_op_membar_epilogue(kit, decorators); return result; } Node* C2BarrierSetCodeGen::fetch_and_add_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, Node* new_val, const Type* value_type, BasicType bt, C2DecoratorSet decorators) { decorators = fixup_decorators(decorators); Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); Node* result = fetch_and_add_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, bt, decorators); atomic_op_membar_epilogue(kit, decorators); return result; } void C2BarrierSetCodeGen::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) { // Exclude the header but include array length to copy by 8 bytes words. // Can't use base_offset_in_bytes(bt) since basic type is unknown. int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : instanceOopDesc::base_offset_in_bytes(); // base_off: // 8 - 32-bit VM // 12 - 64-bit VM, compressed klass // 16 - 64-bit VM, normal klass if (base_off % BytesPerLong != 0) { assert(UseCompressedClassPointers, ""); if (is_array) { // Exclude length to copy by 8 bytes words. base_off += sizeof(int); } else { // Include klass to copy by 8 bytes words. base_off = instanceOopDesc::klass_offset_in_bytes(); } assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); } Node* src_base = kit->basic_plus_adr(src, base_off); Node* dst_base = kit->basic_plus_adr(dst, base_off); // Compute the length also, if needed: Node* countx = size; countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false); ac->set_clonebasic(); Node* n = kit->gvn().transform(ac); if (n == ac) { kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type); } else { kit->set_all_memory(n); } } C2DecoratorSet C2BarrierSetCodeGen::fixup_decorators(C2DecoratorSet decorators) { bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; bool is_release = (decorators & C2_MO_RELEASE) != 0; bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; bool is_atomic = (decorators & C2_ACCESS_ATOMIC) != 0; if (is_volatile) { is_acquire = true; is_release = true; } if (!is_acquire && !is_release) { is_relaxed = true; } else { is_atomic = true; } // Some accesses require access atomicity for all types, notably longs and doubles. // When AlwaysAtomicAccesses is enabled, all accesses are atomic. is_atomic = is_atomic || AlwaysAtomicAccesses; if (is_acquire) { decorators = decorators | C2_MO_ACQUIRE; } if (is_release) { decorators = decorators | C2_MO_RELEASE; } if (is_relaxed) { decorators = decorators | C2_MO_UNORDERED; } if (is_atomic) { decorators = decorators | C2_ACCESS_ATOMIC; } return decorators; }