--- old/src/share/vm/opto/memnode.cpp 2015-06-10 16:45:38.029946402 +0200 +++ new/src/share/vm/opto/memnode.cpp 2015-06-10 16:45:37.098624993 +0200 @@ -2421,40 +2421,53 @@ Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); - // Back-to-back stores to same address? Fold em up. Generally // unsafe if I have intervening uses... Also disallowed for StoreCM // since they must follow each StoreP operation. Redundant StoreCMs // are eliminated just before matching in final_graph_reshape. - if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) && - mem->Opcode() != Op_StoreCM) { - // Looking at a dead closed cycle of memory? - assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); - - assert(Opcode() == mem->Opcode() || - phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw, - "no mismatched stores, except on raw memory"); + { + bool improved = false; + Node* st = mem; + Node* prev = this; + // If anybody other than the previous Store on the memory chain + // uses 'st', we cannot fold 'st' away. For example, 'st' + // might be the final state at a conditional return. Or, 'st' + // might be used by some node which is live at the same time + // 'st' is live, which might be unschedulable. So, require + // exactly ONE user, the 'prev' store, until such time as we clone + // 'mem' for each of 'mem's uses (thus making the + // exactly-1-user-rule hold true). + while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { + // Looking at a dead closed cycle of memory? + assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); + assert(Opcode() == st->Opcode() || + st->Opcode() == Op_StoreVector || + Opcode() == Op_StoreVector || + phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || + (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode + err_msg_res("no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()])); - if (mem->outcnt() == 1 && // check for intervening uses - mem->as_Store()->memory_size() <= this->memory_size()) { - // If anybody other than 'this' uses 'mem', we cannot fold 'mem' away. - // For example, 'mem' might be the final state at a conditional return. - // Or, 'mem' might be used by some node which is live at the same time - // 'this' is live, which might be unschedulable. So, require exactly - // ONE user, the 'this' store, until such time as we clone 'mem' for - // each of 'mem's uses (thus making the exactly-1-user-rule hold true). - if (can_reshape) { // (%%% is this an anachronism?) - set_req_X(MemNode::Memory, mem->in(MemNode::Memory), - phase->is_IterGVN()); - } else { - // It's OK to do this in the parser, since DU info is always accurate, - // and the parser always refers to nodes via SafePointNode maps. - set_req(MemNode::Memory, mem->in(MemNode::Memory)); + if (st->in(MemNode::Address)->eqv_uncast(address) && + st->as_Store()->memory_size() <= this->memory_size()) { + phase->igvn_rehash_node_delayed(prev); + if (can_reshape) { + prev->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN()); + } else { + // It's OK to do this in the parser, since DU info is always accurate, + // and the parser always refers to nodes via SafePointNode maps. + prev->set_req(MemNode::Memory, st->in(MemNode::Memory)); + } + improved = (prev == this); } + prev = st; + st = st->in(MemNode::Memory); + } + if (improved) { return this; } } + // Capture an unaliased, unconditional, simple store into an initializer. // Or, if it is independent of the allocation, hoist it above the allocation. if (ReduceFieldZeroing && /*can_reshape &&*/