< prev index next >

src/share/vm/opto/memnode.cpp

Print this page

        

*** 2549,2597 **** Node *StoreNode::Identity( PhaseTransform *phase ) { Node* mem = in(MemNode::Memory); Node* adr = in(MemNode::Address); Node* val = in(MemNode::ValueIn); // Load then Store? Then the Store is useless if (val->is_Load() && val->in(MemNode::Address)->eqv_uncast(adr) && val->in(MemNode::Memory )->eqv_uncast(mem) && val->as_Load()->store_Opcode() == Opcode()) { ! return mem; } // Two stores in a row of the same value? if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(adr) && mem->in(MemNode::ValueIn)->eqv_uncast(val) && mem->Opcode() == Opcode()) { ! return mem; } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) { // a newly allocated object is already all-zeroes everywhere if (mem->is_Proj() && mem->in(0)->is_Allocate()) { ! return mem; } // the store may also apply to zero-bits in an earlier object Node* prev_mem = find_previous_store(phase); // Steps (a), (b): Walk past independent stores to find an exact match. if (prev_mem != NULL) { Node* prev_val = can_see_stored_value(prev_mem, phase); if (prev_val != NULL && phase->eqv(prev_val, val)) { // prev_val and val might differ by a cast; it would be good // to keep the more informative of the two. ! return mem; } } } ! return this; } //------------------------------match_edge------------------------------------- // Do we Match on this edge index or not? Match only memory & value uint StoreNode::match_edge(uint idx) const { --- 2549,2614 ---- Node *StoreNode::Identity( PhaseTransform *phase ) { Node* mem = in(MemNode::Memory); Node* adr = in(MemNode::Address); Node* val = in(MemNode::ValueIn); + Node* result = this; + // Load then Store? Then the Store is useless if (val->is_Load() && val->in(MemNode::Address)->eqv_uncast(adr) && val->in(MemNode::Memory )->eqv_uncast(mem) && val->as_Load()->store_Opcode() == Opcode()) { ! result = mem; } // Two stores in a row of the same value? if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(adr) && mem->in(MemNode::ValueIn)->eqv_uncast(val) && mem->Opcode() == Opcode()) { ! result = mem; } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (result == this && ! ReduceFieldZeroing && phase->type(val)->is_zero_type()) { // a newly allocated object is already all-zeroes everywhere if (mem->is_Proj() && mem->in(0)->is_Allocate()) { ! result = mem; } + if (result == this) { // the store may also apply to zero-bits in an earlier object Node* prev_mem = find_previous_store(phase); // Steps (a), (b): Walk past independent stores to find an exact match. if (prev_mem != NULL) { Node* prev_val = can_see_stored_value(prev_mem, phase); if (prev_val != NULL && phase->eqv(prev_val, val)) { // prev_val and val might differ by a cast; it would be good // to keep the more informative of the two. ! result = mem; ! } } } } ! if (result != this && phase->is_IterGVN() != NULL) { ! MemBarNode* trailing = trailing_membar(); ! if (trailing != NULL) { ! #ifdef ASSERT ! const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr(); ! assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects"); ! #endif ! PhaseIterGVN* igvn = phase->is_IterGVN(); ! trailing->remove(igvn); ! } ! } ! ! return result; } //------------------------------match_edge------------------------------------- // Do we Match on this edge index or not? Match only memory & value uint StoreNode::match_edge(uint idx) const {
*** 2666,2675 **** --- 2683,2718 ---- } } return true; } + MemBarNode* StoreNode::trailing_membar() const { + if (is_release()) { + MemBarNode* trailing_mb = NULL; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->is_MemBar()) { + if (u->as_MemBar()->trailing_store()) { + assert(u->Opcode() == Op_MemBarVolatile, ""); + assert(trailing_mb == NULL, "only one"); + trailing_mb = u->as_MemBar(); + #ifdef ASSERT + Node* leading = u->as_MemBar()->leading_membar(); + assert(leading->Opcode() == Op_MemBarRelease, "incorrect membar"); + assert(leading->as_MemBar()->leading_store(), "incorrect membar pair"); + assert(leading->as_MemBar()->trailing_membar() == u, "incorrect membar pair"); + #endif + } else { + assert(u->as_MemBar()->standalone(), ""); + } + } + } + return trailing_mb; + } + return NULL; + } + //============================================================================= //------------------------------Ideal------------------------------------------ // If the store is from an AND mask that leaves the low bits untouched, then // we can skip the AND operation. If the store is from a sign-extension // (a left shift, then right shift) we can skip both.
*** 2778,2787 **** --- 2821,2854 ---- return false; } return true; } + MemBarNode* LoadStoreNode::trailing_membar() const { + MemBarNode* trailing = NULL; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->is_MemBar()) { + if (u->as_MemBar()->trailing_load_store()) { + assert(u->Opcode() == Op_MemBarAcquire, ""); + assert(trailing == NULL, "only one"); + trailing = u->as_MemBar(); + #ifdef ASSERT + Node* leading = trailing->leading_membar(); + assert(support_IRIW_for_not_multiple_copy_atomic_cpu || leading->Opcode() == Op_MemBarRelease, "incorrect membar"); + assert(leading->as_MemBar()->leading_load_store(), "incorrect membar pair"); + assert(leading->as_MemBar()->trailing_membar() == trailing, "incorrect membar pair"); + #endif + } else { + assert(u->as_MemBar()->standalone(), "wrong barrier kind"); + } + } + } + + return trailing; + } + uint LoadStoreNode::size_of() const { return sizeof(*this); } //============================================================================= //----------------------------------LoadStoreConditionalNode-------------------- LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
*** 3012,3022 **** } //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), ! _adr_type(C->get_adr_type(alias_idx)) { init_class_id(Class_MemBar); Node* top = C->top(); init_req(TypeFunc::I_O,top); init_req(TypeFunc::FramePtr,top); --- 3079,3092 ---- } //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), ! _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone) ! #ifdef ASSERT ! , _pair_idx(0) ! #endif { init_class_id(Class_MemBar); Node* top = C->top(); init_req(TypeFunc::I_O,top); init_req(TypeFunc::FramePtr,top);
*** 3046,3055 **** --- 3116,3140 ---- case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); default: ShouldNotReachHere(); return NULL; } } + void MemBarNode::remove(PhaseIterGVN *igvn) { + if (outcnt() != 2) { + return; + } + if (trailing_store() || trailing_load_store()) { + MemBarNode* leading = leading_membar(); + if (leading != NULL) { + assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars"); + leading->remove(igvn); + } + } + igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); + igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); + } + //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. Strip out // control copies Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this;
*** 3092,3111 **** } } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && ! alloc->as_Allocate()->_is_non_escaping) { // The allocated object does not escape. eliminate = true; } } if (eliminate) { // Replace MemBar projections by its inputs. PhaseIterGVN* igvn = phase->is_IterGVN(); ! igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); ! igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) return new (phase->C) ConINode(TypeInt::ZERO); } } --- 3177,3197 ---- } } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && ! AARCH64_ONLY ( alloc->as_Allocate()->does_not_escape_thread() ) ! NOT_AARCH64 ( alloc->as_Allocate()->_is_non_escaping ) ! ) { // The allocated object does not escape. eliminate = true; } } if (eliminate) { // Replace MemBar projections by its inputs. PhaseIterGVN* igvn = phase->is_IterGVN(); ! remove(igvn); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) return new (phase->C) ConINode(TypeInt::ZERO); } }
*** 3130,3139 **** --- 3216,3356 ---- } ShouldNotReachHere(); return NULL; } + void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) { + trailing->_kind = TrailingStore; + leading->_kind = LeadingStore; + #ifdef ASSERT + trailing->_pair_idx = leading->_idx; + leading->_pair_idx = leading->_idx; + #endif + } + + void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) { + trailing->_kind = TrailingLoadStore; + leading->_kind = LeadingLoadStore; + #ifdef ASSERT + trailing->_pair_idx = leading->_idx; + leading->_pair_idx = leading->_idx; + #endif + } + + MemBarNode* MemBarNode::trailing_membar() const { + ResourceMark rm; + Node* trailing = (Node*)this; + VectorSet seen(Thread::current()->resource_area()); + + Node_Stack multis(0); + do { + Node* c = trailing; + uint i = 0; + do { + trailing = NULL; + for (; i < c->outcnt(); i++) { + Node* next = c->raw_out(i); + if (next != c && next->is_CFG()) { + if (c->is_MultiBranch()) { + if (multis.node() == c) { + multis.set_index(i+1); + } else { + multis.push(c, i+1); + } + } + trailing = next; + break; + } + } + if (trailing != NULL && !seen.test_set(trailing->_idx)) { + break; + } + while (multis.size() > 0) { + c = multis.node(); + i = multis.index(); + if (i < c->req()) { + break; + } + multis.pop(); + } + } while (multis.size() > 0); + } while (!trailing->is_MemBar() || !trailing->as_MemBar()->trailing()); + + MemBarNode* mb = trailing->as_MemBar(); + assert((mb->_kind == TrailingStore && _kind == LeadingStore) || + (mb->_kind == TrailingLoadStore && _kind == LeadingLoadStore), "bad trailing membar"); + assert(mb->_pair_idx == _pair_idx, "bad trailing membar"); + return mb; + } + + MemBarNode* MemBarNode::leading_membar() const { + ResourceMark rm; + VectorSet seen(Thread::current()->resource_area()); + Node_Stack regions(0); + Node* leading = in(0); + while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) { + while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) { + leading = NULL; + while (regions.size() > 0 && leading == NULL) { + Node* r = regions.node(); + uint i = regions.index(); + if (i < r->req()) { + leading = r->in(i); + regions.set_index(i+1); + } else { + regions.pop(); + } + } + if (leading == NULL) { + assert(regions.size() == 0, "all paths should have been tried"); + return NULL; + } + } + if (leading->is_Region()) { + regions.push(leading, 2); + leading = leading->in(1); + } else { + leading = leading->in(0); + } + } + #ifdef ASSERT + Unique_Node_List wq; + wq.push((Node*)this); + uint found = 0; + for (uint i = 0; i < wq.size(); i++) { + Node* n = wq.at(i); + if (n->is_Region()) { + for (uint j = 1; j < n->req(); j++) { + Node* in = n->in(j); + if (in != NULL && !in->is_top()) { + wq.push(in); + } + } + } else { + if (n->is_MemBar() && n->as_MemBar()->leading()) { + assert(n == leading, "consistency check failed"); + found++; + } else { + Node* in = n->in(0); + if (in != NULL && !in->is_top()) { + wq.push(in); + } + } + } + } + assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed"); + #endif + if (leading == NULL) { + return NULL; + } + MemBarNode* mb = leading->as_MemBar(); + assert((mb->_kind == LeadingStore && _kind == TrailingStore) || + (mb->_kind == LeadingLoadStore && _kind == TrailingLoadStore), "bad leading membar"); + assert(mb->_pair_idx == _pair_idx, "bad leading membar"); + return mb; + } + //===========================InitializeNode==================================== // SUMMARY: // This node acts as a memory barrier on raw memory, after some raw stores. // The 'cooked' oop value feeds from the Initialize, not the Allocation. // The Initialize can 'capture' suitably constrained stores as raw inits.
< prev index next >