< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page

        

*** 33,46 **** #include "utilities/macros.hpp" // By default this is a no-op. void BarrierSetC2::resolve_address(C2Access& access) const { } ! void* C2Access::barrier_set_state() const { return _kit->barrier_set_state(); } bool C2Access::needs_cpu_membar() const { bool mismatched = (_decorators & C2_MISMATCHED) != 0; bool is_unordered = (_decorators & MO_UNORDERED) != 0; bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0; bool in_heap = (_decorators & IN_HEAP) != 0; --- 33,48 ---- #include "utilities/macros.hpp" // By default this is a no-op. void BarrierSetC2::resolve_address(C2Access& access) const { } ! void* C2ParseAccess::barrier_set_state() const { return _kit->barrier_set_state(); } + PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); } + bool C2Access::needs_cpu_membar() const { bool mismatched = (_decorators & C2_MISMATCHED) != 0; bool is_unordered = (_decorators & MO_UNORDERED) != 0; bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0; bool in_heap = (_decorators & IN_HEAP) != 0;
*** 68,102 **** return false; } Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { DecoratorSet decorators = access.decorators(); - GraphKit* kit = access.kit(); bool mismatched = (decorators & C2_MISMATCHED) != 0; bool unaligned = (decorators & C2_UNALIGNED) != 0; bool requires_atomic_access = (decorators & MO_UNORDERED) == 0; bool in_native = (decorators & IN_NATIVE) != 0; assert(!in_native, "not supported yet"); if (access.type() == T_DOUBLE) { Node* new_val = kit->dstore_rounding(val.node()); val.set_node(new_val); } ! MemNode::MemOrd mo = access.mem_node_mo(); ! ! Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(), access.addr().type(), mo, requires_atomic_access, unaligned, mismatched); access.set_raw_access(store); return store; } Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { DecoratorSet decorators = access.decorators(); - GraphKit* kit = access.kit(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); bool mismatched = (decorators & C2_MISMATCHED) != 0; --- 70,130 ---- return false; } Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { DecoratorSet decorators = access.decorators(); bool mismatched = (decorators & C2_MISMATCHED) != 0; bool unaligned = (decorators & C2_UNALIGNED) != 0; bool requires_atomic_access = (decorators & MO_UNORDERED) == 0; bool in_native = (decorators & IN_NATIVE) != 0; assert(!in_native, "not supported yet"); + MemNode::MemOrd mo = access.mem_node_mo(); + + Node* store; + if (access.is_parse_access()) { + C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); + + GraphKit* kit = parse_access.kit(); if (access.type() == T_DOUBLE) { Node* new_val = kit->dstore_rounding(val.node()); val.set_node(new_val); } ! store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(), access.addr().type(), mo, requires_atomic_access, unaligned, mismatched); access.set_raw_access(store); + } else { + assert(!requires_atomic_access, "not yet supported"); + assert(access.is_opt_access(), "either parse or opt access"); + C2OptAccess& opt_access = static_cast<C2OptAccess&>(access); + Node* ctl = opt_access.ctl(); + MergeMemNode* mm = opt_access.mem(); + PhaseGVN& gvn = opt_access.gvn(); + const TypePtr* adr_type = access.addr().type(); + int alias = gvn.C->get_alias_index(adr_type); + Node* mem = mm->memory_at(alias); + + StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo); + if (unaligned) { + st->set_unaligned_access(); + } + if (mismatched) { + st->set_mismatched_access(); + } + store = gvn.transform(st); + if (store == st) { + mm->set_memory_at(alias, st); + } + } return store; } Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { DecoratorSet decorators = access.decorators(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); bool mismatched = (decorators & C2_MISMATCHED) != 0;
*** 107,126 **** bool in_native = (decorators & IN_NATIVE) != 0; MemNode::MemOrd mo = access.mem_node_mo(); LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest; - Node* control = control_dependent ? kit->control() : NULL; Node* load; if (in_native) { load = kit->make_load(control, adr, val_type, access.type(), mo); } else { load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, dep, requires_atomic_access, unaligned, mismatched); } access.set_raw_access(load); return load; } class C2AccessFence: public StackObj { --- 135,169 ---- bool in_native = (decorators & IN_NATIVE) != 0; MemNode::MemOrd mo = access.mem_node_mo(); LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest; Node* load; + if (access.is_parse_access()) { + C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); + GraphKit* kit = parse_access.kit(); + Node* control = control_dependent ? kit->control() : NULL; + if (in_native) { load = kit->make_load(control, adr, val_type, access.type(), mo); } else { load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, dep, requires_atomic_access, unaligned, mismatched); } access.set_raw_access(load); + } else { + assert(!requires_atomic_access, "not yet supported"); + assert(access.is_opt_access(), "either parse or opt access"); + C2OptAccess& opt_access = static_cast<C2OptAccess&>(access); + Node* control = control_dependent ? opt_access.ctl() : NULL; + MergeMemNode* mm = opt_access.mem(); + PhaseGVN& gvn = opt_access.gvn(); + Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type)); + load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched); + load = gvn.transform(load); + } return load; } class C2AccessFence: public StackObj {
*** 128,148 **** Node* _leading_membar; public: C2AccessFence(C2Access& access) : _access(access), _leading_membar(NULL) { ! GraphKit* kit = access.kit(); DecoratorSet decorators = access.decorators(); bool is_write = (decorators & C2_WRITE_ACCESS) != 0; bool is_read = (decorators & C2_READ_ACCESS) != 0; bool is_atomic = is_read && is_write; bool is_volatile = (decorators & MO_SEQ_CST) != 0; bool is_release = (decorators & MO_RELEASE) != 0; if (is_atomic) { // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) { --- 171,196 ---- Node* _leading_membar; public: C2AccessFence(C2Access& access) : _access(access), _leading_membar(NULL) { ! GraphKit* kit = NULL; ! if (access.is_parse_access()) { ! C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); ! kit = parse_access.kit(); ! } DecoratorSet decorators = access.decorators(); bool is_write = (decorators & C2_WRITE_ACCESS) != 0; bool is_read = (decorators & C2_READ_ACCESS) != 0; bool is_atomic = is_read && is_write; bool is_volatile = (decorators & MO_SEQ_CST) != 0; bool is_release = (decorators & MO_RELEASE) != 0; if (is_atomic) { + assert(kit != NULL, "unsupported at optimization time"); // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) {
*** 157,180 **** --- 205,231 ---- } else if (is_write) { // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. if (is_volatile || is_release) { + assert(kit != NULL, "unsupported at optimization time"); _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } } else { // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) { + assert(kit != NULL, "unsupported at optimization time"); _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } } if (access.needs_cpu_membar()) { + assert(kit != NULL, "unsupported at optimization time"); kit->insert_mem_bar(Op_MemBarCPUOrder); } if (is_atomic) { // 4984716: MemBars must be inserted before this
*** 183,193 **** access.set_memory(); } } ~C2AccessFence() { ! GraphKit* kit = _access.kit(); DecoratorSet decorators = _access.decorators(); bool is_write = (decorators & C2_WRITE_ACCESS) != 0; bool is_read = (decorators & C2_READ_ACCESS) != 0; bool is_atomic = is_read && is_write; --- 234,248 ---- access.set_memory(); } } ~C2AccessFence() { ! GraphKit* kit = NULL; ! if (_access.is_parse_access()) { ! C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(_access); ! kit = parse_access.kit(); ! } DecoratorSet decorators = _access.decorators(); bool is_write = (decorators & C2_WRITE_ACCESS) != 0; bool is_read = (decorators & C2_READ_ACCESS) != 0; bool is_atomic = is_read && is_write;
*** 200,227 **** --- 255,285 ---- if (_access.needs_cpu_membar()) { kit->insert_mem_bar(Op_MemBarCPUOrder); } if (is_atomic) { + assert(kit != NULL, "unsupported at optimization time"); if (is_acquire || is_volatile) { Node* n = _access.raw_access(); Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); if (_leading_membar != NULL) { MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); } } } else if (is_write) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) { + assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar if (_leading_membar != NULL) { MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); } } } else { if (is_volatile || is_acquire) { + assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); mb->as_MemBar()->set_trailing_load(); }
*** 293,303 **** const TypePtr* adr_type = _addr.type(); Node* adr = _addr.node(); if (!needs_cpu_membar() && adr_type->isa_instptr()) { assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null"); intptr_t offset = Type::OffsetBot; ! AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset); if (offset >= 0) { int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper()); if (offset < s) { // Guaranteed to be a valid access, no need to pin it _decorators ^= C2_CONTROL_DEPENDENT_LOAD; --- 351,361 ---- const TypePtr* adr_type = _addr.type(); Node* adr = _addr.node(); if (!needs_cpu_membar() && adr_type->isa_instptr()) { assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null"); intptr_t offset = Type::OffsetBot; ! AddPNode::Ideal_base_and_offset(adr, &gvn(), offset); if (offset >= 0) { int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper()); if (offset < s) { // Guaranteed to be a valid access, no need to pin it _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
*** 308,337 **** } } //--------------------------- atomic operations--------------------------------- ! void BarrierSetC2::pin_atomic_op(C2AtomicAccess& access) const { if (!access.needs_pinning()) { return; } // SCMemProjNodes represent the memory state of a LoadStore. Their // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. ! GraphKit* kit = access.kit(); Node* load_store = access.raw_access(); assert(load_store != NULL, "must pin atomic op"); Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); kit->set_memory(proj, access.alias_idx()); } ! void C2AtomicAccess::set_memory() { Node *mem = _kit->memory(_alias_idx); _memory = mem; } ! Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); MemNode::MemOrd mo = access.mem_node_mo(); Node* mem = access.memory(); --- 366,397 ---- } } //--------------------------- atomic operations--------------------------------- ! void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const { if (!access.needs_pinning()) { return; } // SCMemProjNodes represent the memory state of a LoadStore. Their // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. ! assert(access.is_parse_access(), "entry not supported at optimization time"); ! C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); ! GraphKit* kit = parse_access.kit(); Node* load_store = access.raw_access(); assert(load_store != NULL, "must pin atomic op"); Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); kit->set_memory(proj, access.alias_idx()); } ! void C2AtomicParseAccess::set_memory() { Node *mem = _kit->memory(_alias_idx); _memory = mem; } ! Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); MemNode::MemOrd mo = access.mem_node_mo(); Node* mem = access.memory();
*** 384,394 **** #endif return load_store; } ! Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); DecoratorSet decorators = access.decorators(); MemNode::MemOrd mo = access.mem_node_mo(); Node* mem = access.memory(); --- 444,454 ---- #endif return load_store; } ! Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); DecoratorSet decorators = access.decorators(); MemNode::MemOrd mo = access.mem_node_mo(); Node* mem = access.memory();
*** 458,468 **** pin_atomic_op(access); return load_store; } ! Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); Node* mem = access.memory(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); Node* load_store = NULL; --- 518,528 ---- pin_atomic_op(access); return load_store; } ! Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); Node* mem = access.memory(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); Node* load_store = NULL;
*** 506,516 **** #endif return load_store; } ! Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { Node* load_store = NULL; GraphKit* kit = access.kit(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); Node* mem = access.memory(); --- 566,576 ---- #endif return load_store; } ! Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { Node* load_store = NULL; GraphKit* kit = access.kit(); Node* adr = access.addr().node(); const TypePtr* adr_type = access.addr().type(); Node* mem = access.memory();
*** 536,566 **** pin_atomic_op(access); return load_store; } ! Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); } ! Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); } ! Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_xchg_at_resolved(access, new_val, value_type); } ! Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_add_at_resolved(access, new_val, value_type); } --- 596,626 ---- pin_atomic_op(access); return load_store; } ! Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); } ! Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); } ! Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_xchg_at_resolved(access, new_val, value_type); } ! Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { C2AccessFence fence(access); resolve_address(access); return atomic_add_at_resolved(access, new_val, value_type); }
*** 592,602 **** countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; ! ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false); ac->set_clonebasic(); Node* n = kit->gvn().transform(ac); if (n == ac) { ac->_adr_type = TypeRawPtr::BOTTOM; kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type); --- 652,662 ---- countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; ! ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, true, false); ac->set_clonebasic(); Node* n = kit->gvn().transform(ac); if (n == ac) { ac->_adr_type = TypeRawPtr::BOTTOM; kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
*** 729,733 **** --- 789,798 ---- fast_oop_rawmem = macro->make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr, 0, new_alloc_bytes, T_LONG); } return fast_oop; } + + void BarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { + // no barrier + igvn.replace_node(ac, call); + }
< prev index next >