< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

        

*** 42,51 **** --- 42,52 ---- #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/narrowptrnode.hpp" #include "opto/phaseX.hpp" #include "opto/regmask.hpp" + #include "opto/valuetypenode.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" #include "utilities/vmError.hpp" #if INCLUDE_ZGC
*** 816,825 **** --- 817,827 ---- case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break; case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break; + case T_VALUETYPE: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency); } else
*** 1078,1087 **** --- 1080,1095 ---- (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) { // return a zero value for the load's basic type // (This is one of the few places where a generic PhaseTransform // can create new nodes. Think of it as lazily manifesting // virtually pre-existing constants.) + assert(memory_type() != T_VALUETYPE, "should not be used for value types"); + Node* default_value = ld_alloc->in(AllocateNode::DefaultValue); + if (default_value != NULL) { + return default_value; + } + assert(ld_alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null"); return phase->zerocon(memory_type()); } // A load from an initialization barrier can match a captured store. if (st->is_Proj() && st->in(0)->is_Initialize()) {
*** 1135,1144 **** --- 1143,1179 ---- } //------------------------------Identity--------------------------------------- // Loads are identity if previous store is to same address Node* LoadNode::Identity(PhaseGVN* phase) { + // Loading from a ValueTypePtr? The ValueTypePtr has the values of + // all fields as input. Look for the field with matching offset. + Node* addr = in(Address); + intptr_t offset; + Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset); + if (base != NULL && base->is_ValueTypePtr() && offset > oopDesc::klass_offset_in_bytes()) { + Node* value = base->as_ValueTypePtr()->field_value_by_offset((int)offset, true); + if (value->is_ValueType()) { + // Non-flattened value type field + ValueTypeNode* vt = value->as_ValueType(); + if (vt->is_allocated(phase)) { + value = vt->get_oop(); + } else { + // Not yet allocated, bail out + value = NULL; + } + } + if (value != NULL) { + if (Opcode() == Op_LoadN) { + // Encode oop value if we are loading a narrow oop + assert(!phase->type(value)->isa_narrowoop(), "should already be decoded"); + value = phase->transform(new EncodePNode(value, bottom_type())); + } + return value; + } + } + // If the previous store-maker is the right kind of Store, and the store is // to the same address, then we are equal to the value stored. Node* mem = in(Memory); Node* value = can_see_stored_value(mem, phase); if( value ) {
*** 1662,1671 **** --- 1697,1717 ---- set_req(MemNode::Memory, prev_mem); return this; } } + AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase); + if (alloc != NULL && mem->is_Proj() && + mem->in(0) != NULL && + mem->in(0) == alloc->initialization() && + Opcode() == Op_LoadX && + alloc->initialization()->proj_out_or_null(0) != NULL) { + InitializeNode* init = alloc->initialization(); + Node* control = init->proj_out(0); + return alloc->make_ideal_mark(phase, address, control, mem, NULL); + } + return progress ? this : NULL; } // Helper to recognize certain Klass fields which are invariant across // some group of array types (e.g., int[] or all T[] where T < Object).
*** 1751,1760 **** --- 1797,1807 ---- // In fact, that could have been the original type of p1, and p1 could have // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) + && t->isa_valuetype() == NULL && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header? const Type* jt = t->join_speculative(_type);
*** 1785,1840 **** } } else if (tp->base() == Type::InstPtr) { assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_oopptr()->klass()->is_java_lang_Object() || // unsafe field access may not have a constant offset C->has_unsafe_access(), "Field accesses must be precise" ); // For oop loads, we expect the _type to be precise. // Optimize loads from constant fields. const TypeInstPtr* tinst = tp->is_instptr(); ciObject* const_oop = tinst->const_oop(); if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) { ! const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type()); if (con_type != NULL) { return con_type; } } } else if (tp->base() == Type::KlassPtr) { assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks Opcode() == Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise ! } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) { /* With mirrors being an indirect in the Klass* * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset)) * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass). * * So check the type and klass of the node before the LoadP. */ Node* adr2 = adr->in(MemNode::Address); const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); ! if (tkls != NULL && !StressReflectiveCode) { ciKlass* klass = tkls->klass(); ! if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); return TypeInstPtr::make(klass->java_mirror()); } } } const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { ciKlass* klass = tkls->klass(); ! if (klass->is_loaded() && tkls->klass_is_exact()) { // We are loading a field from a Klass metaobject whose identity // is known at compile time (the type is "exact" or "precise"). // Check for fields we know are maintained as constants by the VM. if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { // The field is Klass::_super_check_offset. Return its (constant) value. --- 1832,1915 ---- } } else if (tp->base() == Type::InstPtr) { assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_oopptr()->klass()->is_java_lang_Object() || + tp->is_oopptr()->klass() == ciEnv::current()->Class_klass() || // unsafe field access may not have a constant offset C->has_unsafe_access(), "Field accesses must be precise" ); // For oop loads, we expect the _type to be precise. // Optimize loads from constant fields. const TypeInstPtr* tinst = tp->is_instptr(); ciObject* const_oop = tinst->const_oop(); if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) { ! BasicType bt = memory_type(); ! ciType* mirror_type = const_oop->as_instance()->java_mirror_type(); ! if (mirror_type != NULL && mirror_type->is_valuetype()) { ! ciValueKlass* vk = mirror_type->as_value_klass(); ! if (off == vk->default_value_offset()) { ! // Loading a special hidden field that contains the oop of the default value type ! const Type* const_oop = TypeInstPtr::make(vk->default_value_instance()); ! return (bt == T_NARROWOOP) ? const_oop->make_narrowoop() : const_oop; ! } ! } ! const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt); if (con_type != NULL) { return con_type; } } } else if (tp->base() == Type::KlassPtr) { assert( off != Type::OffsetBot || // arrays can be cast to Objects + tp->is_klassptr()->klass() == NULL || tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks Opcode() == Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise ! } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) { ! if (adr->is_Load() && off == 0) { /* With mirrors being an indirect in the Klass* * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset)) * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass). * * So check the type and klass of the node before the LoadP. */ Node* adr2 = adr->in(MemNode::Address); const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); ! if (tkls != NULL) { ciKlass* klass = tkls->klass(); ! if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); return TypeInstPtr::make(klass->java_mirror()); } } + } else { + // Check for a load of the default value offset from the ValueKlassFixedBlock: + // LoadI(LoadP(value_klass, adr_valueklass_fixed_block_offset), default_value_offset_offset) + intptr_t offset = 0; + Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); + if (base != NULL && base->is_Load() && offset == in_bytes(ValueKlass::default_value_offset_offset())) { + const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr(); + if (tkls != NULL && tkls->is_loaded() && tkls->klass_is_exact() && tkls->isa_valuetype() && + tkls->offset() == in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset())) { + assert(base->Opcode() == Op_LoadP, "must load an oop from klass"); + assert(Opcode() == Op_LoadI, "must load an int from fixed block"); + return TypeInt::make(tkls->klass()->as_value_klass()->default_value_offset()); + } + } + } } const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { ciKlass* klass = tkls->klass(); ! if (tkls->is_loaded() && tkls->klass_is_exact()) { // We are loading a field from a Klass metaobject whose identity // is known at compile time (the type is "exact" or "precise"). // Check for fields we know are maintained as constants by the VM. if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { // The field is Klass::_super_check_offset. Return its (constant) value.
*** 1857,1867 **** } // We can still check if we are loading from the primary_supers array at a // shallow enough depth. Even though the klass is not exact, entries less // than or equal to its super depth are correct. ! if (klass->is_loaded() ) { ciType *inner = klass; while( inner->is_obj_array_klass() ) inner = inner->as_obj_array_klass()->base_element_type(); if( inner->is_instance_klass() && !inner->as_instance_klass()->flags().is_interface() ) { --- 1932,1942 ---- } // We can still check if we are loading from the primary_supers array at a // shallow enough depth. Even though the klass is not exact, entries less // than or equal to its super depth are correct. ! if (tkls->is_loaded()) { ciType *inner = klass; while( inner->is_obj_array_klass() ) inner = inner->as_obj_array_klass()->base_element_type(); if( inner->is_instance_klass() && !inner->as_instance_klass()->flags().is_interface() ) {
*** 2144,2154 **** // Return precise klass return TypeKlassPtr::make(ik); } // Return root of possible klass ! return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/); } } // Check for loading klass from an array const TypeAryPtr *tary = tp->isa_aryptr(); --- 2219,2229 ---- // Return precise klass return TypeKlassPtr::make(ik); } // Return root of possible klass ! return TypeKlassPtr::make(TypePtr::NotNull, ik, Type::Offset(0)); } } // Check for loading klass from an array const TypeAryPtr *tary = tp->isa_aryptr();
*** 2176,2186 **** } // Return precise array klass return TypeKlassPtr::make(ak); } } ! return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); } else { // Found a type-array? //assert(!UseExactTypes, "this code should be useless with exact types"); assert( ak->is_type_array_klass(), "" ); return TypeKlassPtr::make(ak); // These are always precise } --- 2251,2261 ---- } // Return precise array klass return TypeKlassPtr::make(ak); } } ! return TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0)); } else { // Found a type-array? //assert(!UseExactTypes, "this code should be useless with exact types"); assert( ak->is_type_array_klass(), "" ); return TypeKlassPtr::make(ak); // These are always precise }
*** 2188,2210 **** } // Check for loading klass from an array klass const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { ! ciKlass* klass = tkls->klass(); ! if( !klass->is_loaded() ) return _type; // Bail out if not loaded if( klass->is_obj_array_klass() && tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) { ciKlass* elem = klass->as_obj_array_klass()->element_klass(); // // Always returning precise element type is incorrect, // // e.g., element type could be object and array may contain strings // return TypeKlassPtr::make(TypePtr::Constant, elem, 0); // The array's TypeKlassPtr was declared 'precise' or 'not precise' // according to the element type's subclassing. ! return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); } if( klass->is_instance_klass() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::super_offset())) { ciKlass* sup = klass->as_instance_klass()->super(); // The field is Klass::_super. Return its (constant) value. --- 2263,2286 ---- } // Check for loading klass from an array klass const TypeKlassPtr *tkls = tp->isa_klassptr(); if (tkls != NULL && !StressReflectiveCode) { ! if (!tkls->is_loaded()) { return _type; // Bail out if not loaded + } + ciKlass* klass = tkls->klass(); if( klass->is_obj_array_klass() && tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) { ciKlass* elem = klass->as_obj_array_klass()->element_klass(); // // Always returning precise element type is incorrect, // // e.g., element type could be object and array may contain strings // return TypeKlassPtr::make(TypePtr::Constant, elem, 0); // The array's TypeKlassPtr was declared 'precise' or 'not precise' // according to the element type's subclassing. ! return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0)); } if( klass->is_instance_klass() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::super_offset())) { ciKlass* sup = klass->as_instance_klass()->super(); // The field is Klass::_super. Return its (constant) value.
*** 2408,2417 **** --- 2484,2494 ---- case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo); case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo); case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo); case T_METADATA: case T_ADDRESS: + case T_VALUETYPE: case T_OBJECT: #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop())); return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
*** 2486,2495 **** --- 2563,2573 ---- st->Opcode() == Op_StoreVector || Opcode() == Op_StoreVector || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy + (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) || (is_mismatched_access() || st->as_Store()->is_mismatched_access()), "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); if (st->in(MemNode::Address)->eqv_uncast(address) && st->as_Store()->memory_size() <= this->memory_size()) {
*** 2571,2584 **** } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (result == this && ! ReduceFieldZeroing && phase->type(val)->is_zero_type()) { // a newly allocated object is already all-zeroes everywhere ! if (mem->is_Proj() && mem->in(0)->is_Allocate()) { result = mem; } if (result == this) { // the store may also apply to zero-bits in an earlier object --- 2649,2663 ---- } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (result == this && ReduceFieldZeroing) { // a newly allocated object is already all-zeroes everywhere ! if (mem->is_Proj() && mem->in(0)->is_Allocate() && ! (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) { ! assert(!phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == NULL, "storing null to value array is forbidden"); result = mem; } if (result == this) { // the store may also apply to zero-bits in an earlier object
*** 2587,2601 **** --- 2666,2688 ---- if (prev_mem != NULL) { Node* prev_val = can_see_stored_value(prev_mem, phase); if (prev_val != NULL && phase->eqv(prev_val, val)) { // prev_val and val might differ by a cast; it would be good // to keep the more informative of the two. + if (phase->type(val)->is_zero_type()) { + result = mem; + } else if (prev_mem->is_Proj() && prev_mem->in(0)->is_Initialize()) { + InitializeNode* init = prev_mem->in(0)->as_Initialize(); + AllocateNode* alloc = init->allocation(); + if (alloc != NULL && alloc->in(AllocateNode::DefaultValue) == val) { result = mem; } } } } + } + } if (result != this && phase->is_IterGVN() != NULL) { MemBarNode* trailing = trailing_membar(); if (trailing != NULL) { #ifdef ASSERT
*** 2895,2905 **** if (size <= 0 || size % unit != 0) return NULL; intptr_t count = size / unit; // Length too long; communicate this to matchers and assemblers. // Assemblers are responsible to produce fast hardware clears for it. if (size > InitArrayShortSize) { ! return new ClearArrayNode(in(0), in(1), in(2), in(3), true); } Node *mem = in(1); if( phase->type(mem)==Type::TOP ) return NULL; Node *adr = in(3); const Type* at = phase->type(adr); --- 2982,2992 ---- if (size <= 0 || size % unit != 0) return NULL; intptr_t count = size / unit; // Length too long; communicate this to matchers and assemblers. // Assemblers are responsible to produce fast hardware clears for it. if (size > InitArrayShortSize) { ! return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true); } Node *mem = in(1); if( phase->type(mem)==Type::TOP ) return NULL; Node *adr = in(3); const Type* at = phase->type(adr);
*** 2910,2927 **** else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); ! Node *zero = phase->makecon(TypeLong::ZERO); Node *off = phase->MakeConX(BytesPerLong); ! mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); count--; while( count-- ) { mem = phase->transform(mem); adr = phase->transform(new AddPNode(base,adr,off)); ! mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); } return mem; } //----------------------------step_through---------------------------------- --- 2997,3014 ---- else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); ! Node *val = in(4); Node *off = phase->MakeConX(BytesPerLong); ! mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false); count--; while( count-- ) { mem = phase->transform(mem); adr = phase->transform(new AddPNode(base,adr,off)); ! mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false); } return mem; } //----------------------------step_through----------------------------------
*** 2951,2981 **** } //----------------------------clear_memory------------------------------------- // Generate code to initialize object storage to zero. Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, intptr_t start_offset, Node* end_offset, PhaseGVN* phase) { intptr_t offset = start_offset; int unit = BytesPerLong; if ((offset % unit) != 0) { Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); mem = phase->transform(mem); offset += BytesPerInt; } assert((offset % unit) == 0, ""); // Initialize the remaining stuff, if any, with a ClearArray. ! return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, Node* start_offset, Node* end_offset, PhaseGVN* phase) { if (start_offset == end_offset) { // nothing to do --- 3038,3077 ---- } //----------------------------clear_memory------------------------------------- // Generate code to initialize object storage to zero. Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, Node* end_offset, PhaseGVN* phase) { intptr_t offset = start_offset; int unit = BytesPerLong; if ((offset % unit) != 0) { Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; + if (val != NULL) { + assert(phase->type(val)->isa_narrowoop(), "should be narrow oop"); + mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered); + } else { + assert(raw_val == NULL, "val may not be null"); mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + } mem = phase->transform(mem); offset += BytesPerInt; } assert((offset % unit) == 0, ""); // Initialize the remaining stuff, if any, with a ClearArray. ! return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* raw_val, Node* start_offset, Node* end_offset, PhaseGVN* phase) { if (start_offset == end_offset) { // nothing to do
*** 2994,3008 **** } // Bulk clear double-words Node* zsize = phase->transform(new SubXNode(zend, zbase) ); Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) ); ! mem = new ClearArrayNode(ctl, mem, zsize, adr, false); return phase->transform(mem); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, intptr_t start_offset, intptr_t end_offset, PhaseGVN* phase) { if (start_offset == end_offset) { // nothing to do --- 3090,3109 ---- } // Bulk clear double-words Node* zsize = phase->transform(new SubXNode(zend, zbase) ); Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) ); ! if (raw_val == NULL) { ! raw_val = phase->MakeConX(0); ! } ! mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false); return phase->transform(mem); } Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, + Node* val, + Node* raw_val, intptr_t start_offset, intptr_t end_offset, PhaseGVN* phase) { if (start_offset == end_offset) { // nothing to do
*** 3013,3030 **** intptr_t done_offset = end_offset; if ((done_offset % BytesPerLong) != 0) { done_offset -= BytesPerInt; } if (done_offset > start_offset) { ! mem = clear_memory(ctl, mem, dest, start_offset, phase->MakeConX(done_offset), phase); } if (done_offset < end_offset) { // emit the final 32-bit store Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); mem = phase->transform(mem); done_offset += BytesPerInt; } assert(done_offset == end_offset, ""); return mem; --- 3114,3137 ---- intptr_t done_offset = end_offset; if ((done_offset % BytesPerLong) != 0) { done_offset -= BytesPerInt; } if (done_offset > start_offset) { ! mem = clear_memory(ctl, mem, dest, val, raw_val, start_offset, phase->MakeConX(done_offset), phase); } if (done_offset < end_offset) { // emit the final 32-bit store Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; + if (val != NULL) { + assert(phase->type(val)->isa_narrowoop(), "should be narrow oop"); + mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered); + } else { + assert(raw_val == NULL, "val may not be null"); mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); + } mem = phase->transform(mem); done_offset += BytesPerInt; } assert(done_offset == end_offset, ""); return mem;
*** 3169,3179 **** return TypeTuple::MEMBAR; } //------------------------------match------------------------------------------ // Construct projections for memory. ! Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); } --- 3276,3286 ---- return TypeTuple::MEMBAR; } //------------------------------match------------------------------------------ // Construct projections for memory. ! Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); }
*** 3455,3465 **** // convenience function // return false if the init contains any stores already bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { InitializeNode* init = initialization(); ! if (init == NULL || init->is_complete()) return false; init->remove_extra_zeroes(); // for now, if this allocation has already collected any inits, bail: if (init->is_non_zero()) return false; init->set_complete(phase); return true; --- 3562,3574 ---- // convenience function // return false if the init contains any stores already bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { InitializeNode* init = initialization(); ! if (init == NULL || init->is_complete()) { ! return false; ! } init->remove_extra_zeroes(); // for now, if this allocation has already collected any inits, bail: if (init->is_non_zero()) return false; init->set_complete(phase); return true;
*** 4198,4207 **** --- 4307,4318 ---- if (zeroes_needed > zeroes_done) { intptr_t zsize = zeroes_needed - zeroes_done; // Do some incremental zeroing on rawmem, in parallel with inits. zeroes_done = align_down(zeroes_done, BytesPerInt); rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, + allocation()->in(AllocateNode::DefaultValue), + allocation()->in(AllocateNode::RawDefaultValue), zeroes_done, zeroes_needed, phase); zeroes_done = zeroes_needed; if (zsize > InitArrayShortSize && ++big_init_gaps > 2) do_zeroing = false; // leave the hole, next time
*** 4257,4266 **** --- 4368,4379 ---- zeroes_done = size_limit; } } if (zeroes_done < size_limit) { rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, + allocation()->in(AllocateNode::DefaultValue), + allocation()->in(AllocateNode::RawDefaultValue), zeroes_done, size_in_bytes, phase); } } set_complete(phase);
< prev index next >