src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/opto/memnode.cpp	Wed Jun 10 16:45:38 2015
--- new/src/share/vm/opto/memnode.cpp	Wed Jun 10 16:45:37 2015

*** 2419,2462 **** --- 2419,2475 ---- Node* p = MemNode::Ideal_common(phase, can_reshape); if (p) return (p == NodeSentinel) ? NULL : p; Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); // Back-to-back stores to same address? Fold em up. Generally // unsafe if I have intervening uses... Also disallowed for StoreCM // since they must follow each StoreP operation. Redundant StoreCMs // are eliminated just before matching in final_graph_reshape. if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) && mem->Opcode() != Op_StoreCM) { + { + bool improved = false; + Node* st = mem; + Node* prev = this; + // If anybody other than the previous Store on the memory chain + // uses 'st', we cannot fold 'st' away. For example, 'st' + // might be the final state at a conditional return. Or, 'st' + // might be used by some node which is live at the same time + // 'st' is live, which might be unschedulable. So, require + // exactly ONE user, the 'prev' store, until such time as we clone + // 'mem' for each of 'mem's uses (thus making the + // exactly-1-user-rule hold true). + while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? ! assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); ! assert(Opcode() == mem->Opcode() || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw, "no mismatched stores, except on raw memory"); ! assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); + assert(Opcode() == st->Opcode() || ! st->Opcode() == Op_StoreVector || + Opcode() == Op_StoreVector || + phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || + (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode + err_msg_res("no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()])); if (mem->outcnt() == 1 && // check for intervening uses ! mem->as_Store()->memory_size() <= this->memory_size()) { // If anybody other than 'this' uses 'mem', we cannot fold 'mem' away. // For example, 'mem' might be the final state at a conditional return. // Or, 'mem' might be used by some node which is live at the same time // 'this' is live, which might be unschedulable. So, require exactly // ONE user, the 'this' store, until such time as we clone 'mem' for // each of 'mem's uses (thus making the exactly-1-user-rule hold true). if (can_reshape) { // (%%% is this an anachronism?) set_req_X(MemNode::Memory, mem->in(MemNode::Memory), phase->is_IterGVN()); + if (st->in(MemNode::Address)->eqv_uncast(address) && ! st->as_Store()->memory_size() <= this->memory_size()) { + phase->igvn_rehash_node_delayed(prev); + if (can_reshape) { + prev->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN()); } else { // It's OK to do this in the parser, since DU info is always accurate, // and the parser always refers to nodes via SafePointNode maps. ! set_req(MemNode::Memory, mem->in(MemNode::Memory)); ! prev->set_req(MemNode::Memory, st->in(MemNode::Memory)); + } + improved = (prev == this); } + prev = st; + st = st->in(MemNode::Memory); + } + if (improved) { return this; } } + // Capture an unaliased, unconditional, simple store into an initializer. // Or, if it is independent of the allocation, hoist it above the allocation. if (ReduceFieldZeroing && /*can_reshape &&*/ mem->is_Proj() && mem->in(0)->is_Initialize()) { InitializeNode* init = mem->in(0)->as_Initialize();

src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File