src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/opto/memnode.cpp

src/share/vm/opto/memnode.cpp

Print this page
rev 8052 : castpp gcm
rev 8571 : 8080289: Intermediate writes in a loop not eliminated by optimizer
Summary: Move Stores out of loop (after or before) when possible
Reviewed-by:

*** 2419,2462 **** Node* p = MemNode::Ideal_common(phase, can_reshape); if (p) return (p == NodeSentinel) ? NULL : p; Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); - // Back-to-back stores to same address? Fold em up. Generally // unsafe if I have intervening uses... Also disallowed for StoreCM // since they must follow each StoreP operation. Redundant StoreCMs // are eliminated just before matching in final_graph_reshape. ! if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) && ! mem->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? ! assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); ! ! assert(Opcode() == mem->Opcode() || ! phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw, ! "no mismatched stores, except on raw memory"); ! if (mem->outcnt() == 1 && // check for intervening uses ! mem->as_Store()->memory_size() <= this->memory_size()) { ! // If anybody other than 'this' uses 'mem', we cannot fold 'mem' away. ! // For example, 'mem' might be the final state at a conditional return. ! // Or, 'mem' might be used by some node which is live at the same time ! // 'this' is live, which might be unschedulable. So, require exactly ! // ONE user, the 'this' store, until such time as we clone 'mem' for ! // each of 'mem's uses (thus making the exactly-1-user-rule hold true). ! if (can_reshape) { // (%%% is this an anachronism?) ! set_req_X(MemNode::Memory, mem->in(MemNode::Memory), ! phase->is_IterGVN()); } else { // It's OK to do this in the parser, since DU info is always accurate, // and the parser always refers to nodes via SafePointNode maps. ! set_req(MemNode::Memory, mem->in(MemNode::Memory)); } return this; } } // Capture an unaliased, unconditional, simple store into an initializer. // Or, if it is independent of the allocation, hoist it above the allocation. if (ReduceFieldZeroing && /*can_reshape &&*/ mem->is_Proj() && mem->in(0)->is_Initialize()) { InitializeNode* init = mem->in(0)->as_Initialize(); --- 2419,2475 ---- Node* p = MemNode::Ideal_common(phase, can_reshape); if (p) return (p == NodeSentinel) ? NULL : p; Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); // Back-to-back stores to same address? Fold em up. Generally // unsafe if I have intervening uses... Also disallowed for StoreCM // since they must follow each StoreP operation. Redundant StoreCMs // are eliminated just before matching in final_graph_reshape. ! { ! bool improved = false; ! Node* st = mem; ! Node* prev = this; ! // If anybody other than the previous Store on the memory chain ! // uses 'st', we cannot fold 'st' away. For example, 'st' ! // might be the final state at a conditional return. Or, 'st' ! // might be used by some node which is live at the same time ! // 'st' is live, which might be unschedulable. So, require ! // exactly ONE user, the 'prev' store, until such time as we clone ! // 'mem' for each of 'mem's uses (thus making the ! // exactly-1-user-rule hold true). ! while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? ! assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); ! assert(Opcode() == st->Opcode() || ! st->Opcode() == Op_StoreVector || ! Opcode() == Op_StoreVector || ! phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || ! (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode ! err_msg_res("no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()])); ! if (st->in(MemNode::Address)->eqv_uncast(address) && ! st->as_Store()->memory_size() <= this->memory_size()) { ! phase->igvn_rehash_node_delayed(prev); ! if (can_reshape) { ! prev->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN()); } else { // It's OK to do this in the parser, since DU info is always accurate, // and the parser always refers to nodes via SafePointNode maps. ! prev->set_req(MemNode::Memory, st->in(MemNode::Memory)); ! } ! improved = (prev == this); } + prev = st; + st = st->in(MemNode::Memory); + } + if (improved) { return this; } } + // Capture an unaliased, unconditional, simple store into an initializer. // Or, if it is independent of the allocation, hoist it above the allocation. if (ReduceFieldZeroing && /*can_reshape &&*/ mem->is_Proj() && mem->in(0)->is_Initialize()) { InitializeNode* init = mem->in(0)->as_Initialize();
src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File