src/share/vm/opto/memnode.cpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.

*** 905,915 **** } #endif //----------------------------LoadNode::make----------------------------------- // Polymorphic factory method: ! Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { Compile* C = gvn.C; // sanity check the alias category against the created node type assert(!(adr_type->isa_oopptr() && adr_type->offset() == oopDesc::klass_offset_in_bytes()), --- 905,915 ---- } #endif //----------------------------LoadNode::make----------------------------------- // Polymorphic factory method: ! Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, Sem sem) { Compile* C = gvn.C; // sanity check the alias category against the created node type assert(!(adr_type->isa_oopptr() && adr_type->offset() == oopDesc::klass_offset_in_bytes()),
*** 921,958 **** assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || // oop will be recorded in oop map if load crosses safepoint rt->isa_oopptr() || is_immutable_value(adr), "raw memory operations should have control edge"); switch (bt) { ! case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() ); ! case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() ); ! case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int() ); ! case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() ); ! case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int() ); ! case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long() ); ! case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt ); ! case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt ); ! case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr() ); case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { ! Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop())); return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); } else #endif { assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); ! return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); } } ShouldNotReachHere(); return (LoadNode*)NULL; } ! LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) { bool require_atomic = true; ! return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic); } --- 921,958 ---- assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || // oop will be recorded in oop map if load crosses safepoint rt->isa_oopptr() || is_immutable_value(adr), "raw memory operations should have control edge"); switch (bt) { ! case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), sem); ! case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), sem); ! case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), sem); ! case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), sem); ! case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), sem); ! case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), false, sem); ! case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, sem); ! case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, sem); ! case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), sem); case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { ! Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), sem)); return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); } else #endif { assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); ! return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), sem); } } ShouldNotReachHere(); return (LoadNode*)NULL; } ! LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, Sem sem) { bool require_atomic = true; ! return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic, sem); }
*** 2030,2045 **** const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); assert(adr_type != NULL, "expecting TypeKlassPtr"); #ifdef _LP64 if (adr_type->is_ptr_to_narrowklass()) { assert(UseCompressedClassPointers, "no compressed klasses"); ! Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass())); return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); } #endif assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); ! return new (C) LoadKlassNode(ctl, mem, adr, at, tk); } //------------------------------Value------------------------------------------ const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { return klass_value_common(phase); --- 2030,2045 ---- const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); assert(adr_type != NULL, "expecting TypeKlassPtr"); #ifdef _LP64 if (adr_type->is_ptr_to_narrowklass()) { assert(UseCompressedClassPointers, "no compressed klasses"); ! Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), LoadNode::unordered)); return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); } #endif assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); ! return new (C) LoadKlassNode(ctl, mem, adr, at, tk, LoadNode::unordered); } //------------------------------Value------------------------------------------ const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { return klass_value_common(phase);
*** 2345,2393 **** } //============================================================================= //---------------------------StoreNode::make----------------------------------- // Polymorphic factory method: ! StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { Compile* C = gvn.C; ! assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw || ctl != NULL, "raw memory operations should have control edge"); switch (bt) { case T_BOOLEAN: ! case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val); ! case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val); case T_CHAR: ! case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val); ! case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val); ! case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val); ! case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val); case T_METADATA: case T_ADDRESS: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); ! return new (C) StoreNNode(ctl, mem, adr, adr_type, val); } else if (adr->bottom_type()->is_ptr_to_narrowklass() || (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && adr->bottom_type()->isa_rawptr())) { val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); ! return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val); } #endif { ! return new (C) StorePNode(ctl, mem, adr, adr_type, val); } } ShouldNotReachHere(); return (StoreNode*)NULL; } ! StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) { bool require_atomic = true; ! return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic); } //--------------------------bottom_type---------------------------------------- const Type *StoreNode::bottom_type() const { --- 2345,2394 ---- } //============================================================================= //---------------------------StoreNode::make----------------------------------- // Polymorphic factory method: ! StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, Sem sem) { ! assert((sem == unordered || sem == release), "unexpected"); Compile* C = gvn.C; ! assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || ctl != NULL, "raw memory operations should have control edge"); switch (bt) { case T_BOOLEAN: ! case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, sem); ! case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, sem); case T_CHAR: ! case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, sem); ! case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, false, sem); ! case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, sem); ! case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, sem); case T_METADATA: case T_ADDRESS: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); ! return new (C) StoreNNode(ctl, mem, adr, adr_type, val, sem); } else if (adr->bottom_type()->is_ptr_to_narrowklass() || (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && adr->bottom_type()->isa_rawptr())) { val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); ! return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, sem); } #endif { ! return new (C) StorePNode(ctl, mem, adr, adr_type, val, sem); } } ShouldNotReachHere(); return (StoreNode*)NULL; } ! StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, Sem sem) { bool require_atomic = true; ! return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic, sem); } //--------------------------bottom_type---------------------------------------- const Type *StoreNode::bottom_type() const {
*** 2776,2791 **** if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); Node *zero = phase->makecon(TypeLong::ZERO); Node *off = phase->MakeConX(BytesPerLong); ! mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero); count--; while( count-- ) { mem = phase->transform(mem); adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); ! mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero); } return mem; } //----------------------------step_through---------------------------------- --- 2777,2792 ---- if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); Node *zero = phase->makecon(TypeLong::ZERO); Node *off = phase->MakeConX(BytesPerLong); ! mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,false,StoreNode::unordered); count--; while( count-- ) { mem = phase->transform(mem); adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); ! mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,false,StoreNode::unordered); } return mem; } //----------------------------step_through----------------------------------
*** 2825,2835 **** int unit = BytesPerLong; if ((offset % unit) != 0) { Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; ! mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); mem = phase->transform(mem); offset += BytesPerInt; } assert((offset % unit) == 0, ""); --- 2826,2836 ---- int unit = BytesPerLong; if ((offset % unit) != 0) { Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; ! mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, StoreNode::unordered); mem = phase->transform(mem); offset += BytesPerInt; } assert((offset % unit) == 0, "");
*** 2886,2896 **** } if (done_offset < end_offset) { // emit the final 32-bit store Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; ! mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); mem = phase->transform(mem); done_offset += BytesPerInt; } assert(done_offset == end_offset, ""); return mem; --- 2887,2897 ---- } if (done_offset < end_offset) { // emit the final 32-bit store Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; ! mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, StoreNode::unordered); mem = phase->transform(mem); done_offset += BytesPerInt; } assert(done_offset == end_offset, ""); return mem;
*** 3760,3785 **** int nst = 0; if (!split) { ++new_long; off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->longcon(con), T_LONG); } else { // Omit either if it is a zero. if (con0 != 0) { ++new_int; off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->intcon(con0), T_INT); } if (con1 != 0) { ++new_int; offset += BytesPerInt; adr = make_raw_address(offset, phase); off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->intcon(con1), T_INT); } } // Insert second store first, then the first before the second. // Insert each one just before any overlapping non-constant stores. --- 3761,3786 ---- int nst = 0; if (!split) { ++new_long; off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->longcon(con), T_LONG, StoreNode::unordered); } else { // Omit either if it is a zero. if (con0 != 0) { ++new_int; off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->intcon(con0), T_INT, StoreNode::unordered); } if (con1 != 0) { ++new_int; offset += BytesPerInt; adr = make_raw_address(offset, phase); off[nst] = offset; st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, ! phase->intcon(con1), T_INT, StoreNode::unordered); } } // Insert second store first, then the first before the second. // Insert each one just before any overlapping non-constant stores.