< prev index next >

src/share/vm/opto/memnode.cpp

Print this page

        

*** 492,503 **** // Find an arraycopy that must have set (can_see_stored_value=true) or // could have set (can_see_stored_value=false) the value for this load Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { ! if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore || ! mem->in(0)->Opcode() == Op_MemBarCPUOrder)) { Node* mb = mem->in(0); if (mb->in(0) != NULL && mb->in(0)->is_Proj() && mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) { ArrayCopyNode* ac = mb->in(0)->in(0)->as_ArrayCopy(); if (ac->is_clonebasic()) { --- 492,503 ---- // Find an arraycopy that must have set (can_see_stored_value=true) or // could have set (can_see_stored_value=false) the value for this load Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { ! if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Opcodes::Op_MemBarStoreStore || ! mem->in(0)->Opcode() == Opcodes::Op_MemBarCPUOrder)) { Node* mb = mem->in(0); if (mb->in(0) != NULL && mb->in(0)->is_Proj() && mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) { ArrayCopyNode* ac = mb->in(0)->in(0)->as_ArrayCopy(); if (ac->is_clonebasic()) {
*** 732,742 **** } uint LoadNode::size_of() const { return sizeof(*this); } uint LoadNode::cmp( const Node &n ) const { return !Type::cmp( _type, ((LoadNode&)n)._type ); } const Type *LoadNode::bottom_type() const { return _type; } ! uint LoadNode::ideal_reg() const { return _type->ideal_reg(); } #ifndef PRODUCT void LoadNode::dump_spec(outputStream *st) const { --- 732,742 ---- } uint LoadNode::size_of() const { return sizeof(*this); } uint LoadNode::cmp( const Node &n ) const { return !Type::cmp( _type, ((LoadNode&)n)._type ); } const Type *LoadNode::bottom_type() const { return _type; } ! Opcodes LoadNode::ideal_reg() const { return _type->ideal_reg(); } #ifndef PRODUCT void LoadNode::dump_spec(outputStream *st) const {
*** 754,764 **** #ifdef ASSERT //----------------------------is_immutable_value------------------------------- // Helper function to allow a raw load without control edge for some cases bool LoadNode::is_immutable_value(Node* adr) { return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && ! adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(JavaThread::osthread_offset()))); } #endif --- 754,764 ---- #ifdef ASSERT //----------------------------is_immutable_value------------------------------- // Helper function to allow a raw load without control edge for some cases bool LoadNode::is_immutable_value(Node* adr) { return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && ! adr->in(AddPNode::Address)->Opcode() == Opcodes::Op_ThreadLocal && (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(JavaThread::osthread_offset()))); } #endif
*** 808,818 **** load->set_unaligned_access(); } if (mismatched) { load->set_mismatched_access(); } ! if (load->Opcode() == Op_LoadN) { Node* ld = gvn.transform(load); return new DecodeNNode(ld, ld->bottom_type()->make_ptr()); } return load; --- 808,818 ---- load->set_unaligned_access(); } if (mismatched) { load->set_mismatched_access(); } ! if (load->Opcode() == Opcodes::Op_LoadN) { Node* ld = gvn.transform(load); return new DecodeNNode(ld, ld->bottom_type()->make_ptr()); } return load;
*** 948,966 **** // kind of node is encountered. Loads from final memory can skip // through any kind of MemBar but normal loads shouldn't skip // through MemBarAcquire since the could allow them to move out of // a synchronized region. while (current->is_Proj()) { ! int opc = current->in(0)->Opcode(); ! if ((final && (opc == Op_MemBarAcquire || ! opc == Op_MemBarAcquireLock || ! opc == Op_LoadFence)) || ! opc == Op_MemBarRelease || ! opc == Op_StoreFence || ! opc == Op_MemBarReleaseLock || ! opc == Op_MemBarStoreStore || ! opc == Op_MemBarCPUOrder) { Node* mem = current->in(0)->in(TypeFunc::Memory); if (mem->is_MergeMem()) { MergeMemNode* merge = mem->as_MergeMem(); Node* new_st = merge->memory_at(alias_idx); if (new_st == merge->base_memory()) { --- 948,966 ---- // kind of node is encountered. Loads from final memory can skip // through any kind of MemBar but normal loads shouldn't skip // through MemBarAcquire since the could allow them to move out of // a synchronized region. while (current->is_Proj()) { ! Opcodes opc = current->in(0)->Opcode(); ! if ((final && (opc == Opcodes::Op_MemBarAcquire || ! opc == Opcodes::Op_MemBarAcquireLock || ! opc == Opcodes::Op_LoadFence)) || ! opc == Opcodes::Op_MemBarRelease || ! opc == Opcodes::Op_StoreFence || ! opc == Opcodes::Op_MemBarReleaseLock || ! opc == Opcodes::Op_MemBarStoreStore || ! opc == Opcodes::Op_MemBarCPUOrder) { Node* mem = current->in(0)->in(TypeFunc::Memory); if (mem->is_MergeMem()) { MergeMemNode* merge = mem->as_MergeMem(); Node* new_st = merge->memory_at(alias_idx); if (new_st == merge->base_memory()) {
*** 1124,1137 **** // Construct an equivalent unsigned load. Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { ! case Op_LoadUB: return this; ! case Op_LoadUS: return this; ! case Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break; ! case Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; default: assert(false, "no unsigned variant: %s", Name()); return NULL; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), --- 1124,1137 ---- // Construct an equivalent unsigned load. Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { ! case Opcodes::Op_LoadUB: return this; ! case Opcodes::Op_LoadUS: return this; ! case Opcodes::Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break; ! case Opcodes::Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; default: assert(false, "no unsigned variant: %s", Name()); return NULL; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
*** 1142,1157 **** // Construct an equivalent signed load. Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { ! case Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; ! case Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; ! case Op_LoadB: // fall through ! case Op_LoadS: // fall through ! case Op_LoadI: // fall through ! case Op_LoadL: return this; default: assert(false, "no signed variant: %s", Name()); return NULL; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), --- 1142,1157 ---- // Construct an equivalent signed load. Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; const Type* rt = NULL; switch (Opcode()) { ! case Opcodes::Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; ! case Opcodes::Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; ! case Opcodes::Op_LoadB: // fall through ! case Opcodes::Op_LoadS: // fall through ! case Opcodes::Op_LoadI: // fall through ! case Opcodes::Op_LoadL: return this; default: assert(false, "no signed variant: %s", Name()); return NULL; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
*** 1198,1208 **** Node* elements[4]; int shift = exact_log2(type2aelembytes(T_OBJECT)); int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); if ((count > 0) && elements[0]->is_Con() && ((count == 1) || ! (count == 2) && elements[1]->Opcode() == Op_LShiftX && elements[1]->in(2) == phase->intcon(shift))) { ciObjArray* array = base_type->const_oop()->as_obj_array(); // Fetch the box object cache[0] at the base of the array and get its value ciInstance* box = array->obj_at(0)->as_instance(); ciInstanceKlass* ik = box->klass()->as_instance_klass(); --- 1198,1208 ---- Node* elements[4]; int shift = exact_log2(type2aelembytes(T_OBJECT)); int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); if ((count > 0) && elements[0]->is_Con() && ((count == 1) || ! (count == 2) && elements[1]->Opcode() == Opcodes::Op_LShiftX && elements[1]->in(2) == phase->intcon(shift))) { ciObjArray* array = base_type->const_oop()->as_obj_array(); // Fetch the box object cache[0] at the base of the array and get its value ciInstance* box = array->obj_at(0)->as_instance(); ciInstanceKlass* ik = box->klass()->as_instance_klass();
*** 1231,1246 **** result = phase->transform(new AddXNode(result, elements[i])); } // Remove the constant offset from the address and then result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset))); // remove the scaling of the offset to recover the original index. ! if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { // Peel the shift off directly but wrap it in a dummy node // since Ideal can't return existing nodes result = new RShiftXNode(result->in(1), phase->intcon(0)); } else if (result->is_Add() && result->in(2)->is_Con() && ! result->in(1)->Opcode() == Op_LShiftX && result->in(1)->in(2) == phase->intcon(shift)) { // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z) // but for boxing cache access we know that X<<Z will not overflow // (there is range check) so we do this optimizatrion by hand here. Node* add_con = new RShiftXNode(result->in(2), phase->intcon(shift)); --- 1231,1246 ---- result = phase->transform(new AddXNode(result, elements[i])); } // Remove the constant offset from the address and then result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset))); // remove the scaling of the offset to recover the original index. ! if (result->Opcode() == Opcodes::Op_LShiftX && result->in(2) == phase->intcon(shift)) { // Peel the shift off directly but wrap it in a dummy node // since Ideal can't return existing nodes result = new RShiftXNode(result->in(1), phase->intcon(0)); } else if (result->is_Add() && result->in(2)->is_Con() && ! result->in(1)->Opcode() == Opcodes::Op_LShiftX && result->in(1)->in(2) == phase->intcon(shift)) { // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z) // but for boxing cache access we know that X<<Z will not overflow // (there is range check) so we do this optimizatrion by hand here. Node* add_con = new RShiftXNode(result->in(2), phase->intcon(shift));
*** 1258,1271 **** } #endif // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair). // Need to preserve unboxing load type if it is unsigned. switch(this->Opcode()) { ! case Op_LoadUB: result = new AndINode(phase->transform(result), phase->intcon(0xFF)); break; ! case Op_LoadUS: result = new AndINode(phase->transform(result), phase->intcon(0xFFFF)); break; } return result; } --- 1258,1271 ---- } #endif // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair). // Need to preserve unboxing load type if it is unsigned. switch(this->Opcode()) { ! case Opcodes::Op_LoadUB: result = new AndINode(phase->transform(result), phase->intcon(0xFF)); break; ! case Opcodes::Op_LoadUS: result = new AndINode(phase->transform(result), phase->intcon(0xFFFF)); break; } return result; }
*** 1483,1493 **** Node* address = in(MemNode::Address); bool progress = false; // Skip up past a SafePoint control. Cannot do this for Stores because // pointer stores & cardmarks must stay on the same side of a SafePoint. ! if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { ctrl = ctrl->in(0); set_req(MemNode::Control,ctrl); progress = true; } --- 1483,1493 ---- Node* address = in(MemNode::Address); bool progress = false; // Skip up past a SafePoint control. Cannot do this for Stores because // pointer stores & cardmarks must stay on the same side of a SafePoint. ! if( ctrl != NULL && ctrl->Opcode() == Opcodes::Op_SafePoint && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { ctrl = ctrl->in(0); set_req(MemNode::Control,ctrl); progress = true; }
*** 1602,1623 **** LoadNode::load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const { if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { // The field is Klass::_modifier_flags. Return its (constant) value. // (Folds up the 2nd indirection in aClassConstant.getModifiers().) ! assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); return TypeInt::make(klass->modifier_flags()); } if (tkls->offset() == in_bytes(Klass::access_flags_offset())) { // The field is Klass::_access_flags. Return its (constant) value. // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) ! assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); return TypeInt::make(klass->access_flags()); } if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) { // The field is Klass::_layout_helper. Return its constant value if known. ! assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); return TypeInt::make(klass->layout_helper()); } // No match. return NULL; --- 1602,1623 ---- LoadNode::load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const { if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { // The field is Klass::_modifier_flags. Return its (constant) value. // (Folds up the 2nd indirection in aClassConstant.getModifiers().) ! assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _modifier_flags"); return TypeInt::make(klass->modifier_flags()); } if (tkls->offset() == in_bytes(Klass::access_flags_offset())) { // The field is Klass::_access_flags. Return its (constant) value. // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) ! assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _access_flags"); return TypeInt::make(klass->access_flags()); } if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) { // The field is Klass::_layout_helper. Return its constant value if known. ! assert(this->Opcode() == Opcodes::Op_LoadI, "must load an int from _layout_helper"); return TypeInt::make(klass->layout_helper()); } // No match. return NULL;
*** 1680,1690 **** // In fact, that could have been the original type of p1, and p1 could have // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) ! && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. if (off_beyond_header) { // is the offset beyond the header? const Type* jt = t->join_speculative(_type); // In any case, do not allow the join, per se, to empty out the type. --- 1680,1690 ---- // In fact, that could have been the original type of p1, and p1 could have // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) ! && Opcode() != Opcodes::Op_LoadKlass && Opcode() != Opcodes::Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. if (off_beyond_header) { // is the offset beyond the header? const Type* jt = t->join_speculative(_type); // In any case, do not allow the join, per se, to empty out the type.
*** 1735,1745 **** assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks ! Opcode() == Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise } const TypeKlassPtr *tkls = tp->isa_klassptr(); --- 1735,1745 ---- assert( off != Type::OffsetBot || // arrays can be cast to Objects tp->is_klassptr()->klass()->is_java_lang_Object() || // also allow array-loading from the primary supertype // array during subtype checks ! Opcode() == Opcodes::Op_LoadKlass, "Field accesses must be precise" ); // For klass/static loads, we expect the _type to be precise } const TypeKlassPtr *tkls = tp->isa_klassptr();
*** 1750,1778 **** // is known at compile time (the type is "exact" or "precise"). // Check for fields we know are maintained as constants by the VM. if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { // The field is Klass::_super_check_offset. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); return TypeInt::make(klass->super_check_offset()); } // Compute index into primary_supers array juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); // Check for overflowing; use unsigned compare to handle the negative case. if( depth < ciKlass::primary_super_limit() ) { // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } const Type* aift = load_array_final_field(tkls, klass); if (aift != NULL) return aift; if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) { // The field is Klass::_java_mirror. Return its (constant) value. // (Folds up the 2nd indirection in anObjConstant.getClass().) ! assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); return TypeInstPtr::make(klass->java_mirror()); } } // We can still check if we are loading from the primary_supers array at a --- 1750,1778 ---- // is known at compile time (the type is "exact" or "precise"). // Check for fields we know are maintained as constants by the VM. if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { // The field is Klass::_super_check_offset. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Opcodes::Op_LoadI, "must load an int from _super_check_offset"); return TypeInt::make(klass->super_check_offset()); } // Compute index into primary_supers array juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); // Check for overflowing; use unsigned compare to handle the negative case. if( depth < ciKlass::primary_super_limit() ) { // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Opcodes::Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } const Type* aift = load_array_final_field(tkls, klass); if (aift != NULL) return aift; if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) { // The field is Klass::_java_mirror. Return its (constant) value. // (Folds up the 2nd indirection in anObjConstant.getClass().) ! assert(Opcode() == Opcodes::Op_LoadP, "must load an oop from _java_mirror"); return TypeInstPtr::make(klass->java_mirror()); } } // We can still check if we are loading from the primary_supers array at a
*** 1789,1799 **** // Check for overflowing; use unsigned compare to handle the negative case. if( depth < ciKlass::primary_super_limit() && depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } } } --- 1789,1799 ---- // Check for overflowing; use unsigned compare to handle the negative case. if( depth < ciKlass::primary_super_limit() && depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case // The field is an element of Klass::_primary_supers. Return its (constant) value. // (Folds up type checking code.) ! assert(Opcode() == Opcodes::Op_LoadKlass, "must load a klass from _primary_supers"); ciKlass *ss = klass->super_of_depth(depth); return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; } } }
*** 1806,1816 **** && !klass->is_interface() // specifically not Serializable & Cloneable && !klass->is_java_lang_Object() // not the supertype of all T[] ) { // Note: When interfaces are reliable, we can narrow the interface // test to (klass != Serializable && klass != Cloneable). ! assert(Opcode() == Op_LoadI, "must load an int from _layout_helper"); jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); // The key property of this type is that it folds up tests // for array-ness, since it proves that the layout_helper is positive. // Thus, a generic value like the basic object layout helper works fine. return TypeInt::make(min_size, max_jint, Type::WidenMin); --- 1806,1816 ---- && !klass->is_interface() // specifically not Serializable & Cloneable && !klass->is_java_lang_Object() // not the supertype of all T[] ) { // Note: When interfaces are reliable, we can narrow the interface // test to (klass != Serializable && klass != Cloneable). ! assert(Opcode() == Opcodes::Op_LoadI, "must load an int from _layout_helper"); jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); // The key property of this type is that it folds up tests // for array-ness, since it proves that the layout_helper is positive. // Thus, a generic value like the basic object layout helper works fine. return TypeInt::make(min_size, max_jint, Type::WidenMin);
*** 2387,2406 **** // return. Or, 'st' might be used by some node which is live at // the same time 'st' is live, which might be unschedulable. So, // require exactly ONE user until such time as we clone 'mem' for // each of 'mem's uses (thus making the exactly-1-user-rule hold // true). ! while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); assert(Opcode() == st->Opcode() || ! st->Opcode() == Op_StoreVector || ! Opcode() == Op_StoreVector || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || ! (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode (is_mismatched_access() || st->as_Store()->is_mismatched_access()), ! "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); if (st->in(MemNode::Address)->eqv_uncast(address) && st->as_Store()->memory_size() <= this->memory_size()) { Node* use = st->raw_out(0); phase->igvn_rehash_node_delayed(use); --- 2387,2406 ---- // return. Or, 'st' might be used by some node which is live at // the same time 'st' is live, which might be unschedulable. So, // require exactly ONE user until such time as we clone 'mem' for // each of 'mem's uses (thus making the exactly-1-user-rule hold // true). ! while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Opcodes::Op_StoreCM) { // Looking at a dead closed cycle of memory? assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); assert(Opcode() == st->Opcode() || ! st->Opcode() == Opcodes::Op_StoreVector || ! Opcode() == Opcodes::Op_StoreVector || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || ! (Opcode() == Opcodes::Op_StoreL && st->Opcode() == Opcodes::Op_StoreI) || // expanded ClearArrayNode (is_mismatched_access() || st->as_Store()->is_mismatched_access()), ! "no mismatched stores, except on raw memory: %s %s", NodeClassNames[static_cast<uint>(Opcode())], NodeClassNames[static_cast<uint>(st->Opcode())]); if (st->in(MemNode::Address)->eqv_uncast(address) && st->as_Store()->memory_size() <= this->memory_size()) { Node* use = st->raw_out(0); phase->igvn_rehash_node_delayed(use);
*** 2519,2529 **** // (StoreB ... (AndI valIn conIa) ) // If (conIa & mask == mask) this simplifies to // (StoreB ... (valIn) ) Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { Node *val = in(MemNode::ValueIn); ! if( val->Opcode() == Op_AndI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() & mask) == mask ) { set_req(MemNode::ValueIn, val->in(1)); return this; } --- 2519,2529 ---- // (StoreB ... (AndI valIn conIa) ) // If (conIa & mask == mask) this simplifies to // (StoreB ... (valIn) ) Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { Node *val = in(MemNode::ValueIn); ! if( val->Opcode() == Opcodes::Op_AndI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() & mask) == mask ) { set_req(MemNode::ValueIn, val->in(1)); return this; }
*** 2537,2551 **** // (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) ) // If (conIL == conIR && conIR <= num_bits) this simplifies to // (StoreB ... (valIn) ) Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { Node *val = in(MemNode::ValueIn); ! if( val->Opcode() == Op_RShiftI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() <= num_bits) ) { Node *shl = val->in(1); ! if( shl->Opcode() == Op_LShiftI ) { const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { set_req(MemNode::ValueIn, shl->in(1)); return this; } --- 2537,2551 ---- // (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) ) // If (conIL == conIR && conIR <= num_bits) this simplifies to // (StoreB ... (valIn) ) Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { Node *val = in(MemNode::ValueIn); ! if( val->Opcode() == Opcodes::Op_RShiftI ) { const TypeInt *t = phase->type( val->in(2) )->isa_int(); if( t && t->is_con() && (t->get_con() <= num_bits) ) { Node *shl = val->in(1); ! if( shl->Opcode() == Opcodes::Op_LShiftI ) { const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { set_req(MemNode::ValueIn, shl->in(1)); return this; }
*** 2673,2690 **** init_req(MemNode::Address, adr); init_req(MemNode::ValueIn, val); init_class_id(Class_LoadStore); } ! uint LoadStoreNode::ideal_reg() const { return _type->ideal_reg(); } bool LoadStoreNode::result_not_used() const { for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { Node *x = fast_out(i); ! if (x->Opcode() == Op_SCMemProj) continue; return false; } return true; } --- 2673,2690 ---- init_req(MemNode::Address, adr); init_req(MemNode::ValueIn, val); init_class_id(Class_LoadStore); } ! Opcodes LoadStoreNode::ideal_reg() const { return _type->ideal_reg(); } bool LoadStoreNode::result_not_used() const { for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { Node *x = fast_out(i); ! if (x->Opcode() == Opcodes::Op_SCMemProj) continue; return false; } return true; }
*** 2747,2757 **** const TypePtr* atp = at->isa_ptr(); // adjust atp to be the correct array element address type if (atp == NULL) atp = TypePtr::BOTTOM; else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes ! if( adr->Opcode() != Op_AddP ) Unimplemented(); Node *base = adr->in(1); Node *zero = phase->makecon(TypeLong::ZERO); Node *off = phase->MakeConX(BytesPerLong); mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); --- 2747,2757 ---- const TypePtr* atp = at->isa_ptr(); // adjust atp to be the correct array element address type if (atp == NULL) atp = TypePtr::BOTTOM; else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes ! if( adr->Opcode() != Opcodes::Op_AddP ) Unimplemented(); Node *base = adr->in(1); Node *zero = phase->makecon(TypeLong::ZERO); Node *off = phase->MakeConX(BytesPerLong); mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
*** 2889,2911 **** uint MemBarNode::cmp( const Node &n ) const { return (&n == this); // Always fail except on self } //------------------------------make------------------------------------------- ! MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { switch (opcode) { ! case Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn); ! case Op_LoadFence: return new LoadFenceNode(C, atp, pn); ! case Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn); ! case Op_StoreFence: return new StoreFenceNode(C, atp, pn); ! case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); ! case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); ! case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); ! case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); ! case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); ! case Op_Initialize: return new InitializeNode(C, atp, pn); ! case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn); default: ShouldNotReachHere(); return NULL; } } //------------------------------Ideal------------------------------------------ --- 2889,2911 ---- uint MemBarNode::cmp( const Node &n ) const { return (&n == this); // Always fail except on self } //------------------------------make------------------------------------------- ! MemBarNode* MemBarNode::make(Compile* C, Opcodes opcode, int atp, Node* pn) { switch (opcode) { ! case Opcodes::Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn); ! case Opcodes::Op_LoadFence: return new LoadFenceNode(C, atp, pn); ! case Opcodes::Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn); ! case Opcodes::Op_StoreFence: return new StoreFenceNode(C, atp, pn); ! case Opcodes::Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); ! case Opcodes::Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); ! case Opcodes::Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); ! case Opcodes::Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); ! case Opcodes::Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); ! case Opcodes::Op_Initialize: return new InitializeNode(C, atp, pn); ! case Opcodes::Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn); default: ShouldNotReachHere(); return NULL; } } //------------------------------Ideal------------------------------------------
*** 2920,2938 **** bool progress = false; // Eliminate volatile MemBars for scalar replaced objects. if (can_reshape && req() == (Precedent+1)) { bool eliminate = false; ! int opc = Opcode(); ! if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { // Volatile field loads and stores. Node* my_mem = in(MemBarNode::Precedent); // The MembarAquire may keep an unused LoadNode alive through the Precedent edge ! if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { // if the Precedent is a decodeN and its input (a Load) is used at more than one place, // replace this Precedent (decodeN) with the Load instead. ! if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { Node* load_node = my_mem->in(1); set_req(MemBarNode::Precedent, load_node); phase->is_IterGVN()->_worklist.push(my_mem); my_mem = load_node; } else { --- 2920,2938 ---- bool progress = false; // Eliminate volatile MemBars for scalar replaced objects. if (can_reshape && req() == (Precedent+1)) { bool eliminate = false; ! Opcodes opc = Opcode(); ! if ((opc == Opcodes::Op_MemBarAcquire || opc == Opcodes::Op_MemBarVolatile)) { // Volatile field loads and stores. Node* my_mem = in(MemBarNode::Precedent); // The MembarAquire may keep an unused LoadNode alive through the Precedent edge ! if ((my_mem != NULL) && (opc == Opcodes::Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { // if the Precedent is a decodeN and its input (a Load) is used at more than one place, // replace this Precedent (decodeN) with the Load instead. ! if ((my_mem->Opcode() == Opcodes::Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { Node* load_node = my_mem->in(1); set_req(MemBarNode::Precedent, load_node); phase->is_IterGVN()->_worklist.push(my_mem); my_mem = load_node; } else {
*** 2950,2960 **** t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop) { eliminate = true; } } ! } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && alloc->as_Allocate()->does_not_escape_thread()) { // The allocated object does not escape. --- 2950,2960 ---- t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop) { eliminate = true; } } ! } else if (opc == Opcodes::Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && alloc->as_Allocate()->does_not_escape_thread()) { // The allocated object does not escape.
*** 2986,2996 **** // Construct projections for memory. Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: ! return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); } ShouldNotReachHere(); return NULL; } --- 2986,2996 ---- // Construct projections for memory. Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::Memory: ! return new MachProjNode(this,proj->_con,RegMask::Empty,static_cast<Opcodes>(MachProjNode::projType::unmatched_proj)); } ShouldNotReachHere(); return NULL; }
*** 3101,3111 **** // register allocator. Declare that there are no constraints // on the allocation of the RawAddress edge. const RegMask &InitializeNode::in_RegMask(uint idx) const { // This edge should be set to top, by the set_complete. But be conservative. if (idx == InitializeNode::RawAddress) ! return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]); return RegMask::Empty; } Node* InitializeNode::memory(uint alias_idx) { Node* mem = in(Memory); --- 3101,3111 ---- // register allocator. Declare that there are no constraints // on the allocation of the RawAddress edge. const RegMask &InitializeNode::in_RegMask(uint idx) const { // This edge should be set to top, by the set_complete. But be conservative. if (idx == InitializeNode::RawAddress) ! return *(Compile::current()->matcher()->idealreg2spillmask[static_cast<uint>(in(idx)->ideal_reg())]); return RegMask::Empty; } Node* InitializeNode::memory(uint alias_idx) { Node* mem = in(Memory);
*** 3583,3593 **** case T_DOUBLE: con = jlong_cast(val->getd()); break; default: continue; //skip (odd store type) } if (type == T_LONG && Matcher::isSimpleConstant64(con) && ! st->Opcode() == Op_StoreL) { continue; // This StoreL is already optimal. } // Store down the constant. store_constant(tiles, num_tiles, st_off, st_size, con); --- 3583,3593 ---- case T_DOUBLE: con = jlong_cast(val->getd()); break; default: continue; //skip (odd store type) } if (type == T_LONG && Matcher::isSimpleConstant64(con) && ! st->Opcode() == Opcodes::Op_StoreL) { continue; // This StoreL is already optimal. } // Store down the constant. store_constant(tiles, num_tiles, st_off, st_size, con);
*** 3596,3617 **** if (type == T_INT && st_size == BytesPerInt && (st_off & BytesPerInt) == BytesPerInt) { jlong lcon = tiles[j]; if (!Matcher::isSimpleConstant64(lcon) && ! st->Opcode() == Op_StoreI) { // This StoreI is already optimal by itself. jint* intcon = (jint*) &tiles[j]; intcon[1] = 0; // undo the store_constant() // If the previous store is also optimal by itself, back up and // undo the action of the previous loop iteration... if we can. // But if we can't, just let the previous half take care of itself. st = nodes[j]; st_off -= BytesPerInt; con = intcon[0]; ! if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { assert(st_off >= header_size, "still ignoring header"); assert(get_store_offset(st, phase) == st_off, "must be"); assert(in(i-1) == zmem, "must be"); DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn))); assert(con == tcon->is_int()->get_con(), "must be"); --- 3596,3617 ---- if (type == T_INT && st_size == BytesPerInt && (st_off & BytesPerInt) == BytesPerInt) { jlong lcon = tiles[j]; if (!Matcher::isSimpleConstant64(lcon) && ! st->Opcode() == Opcodes::Op_StoreI) { // This StoreI is already optimal by itself. jint* intcon = (jint*) &tiles[j]; intcon[1] = 0; // undo the store_constant() // If the previous store is also optimal by itself, back up and // undo the action of the previous loop iteration... if we can. // But if we can't, just let the previous half take care of itself. st = nodes[j]; st_off -= BytesPerInt; con = intcon[0]; ! if (con != 0 && st != NULL && st->Opcode() == Opcodes::Op_StoreI) { assert(st_off >= header_size, "still ignoring header"); assert(get_store_offset(st, phase) == st_off, "must be"); assert(in(i-1) == zmem, "must be"); DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn))); assert(con == tcon->is_int()->get_con(), "must be");
*** 3930,3940 **** zeroes_done = align_size_down(zeroes_done, BytesPerInt); // if it is the last unused 4 bytes of an instance, forget about it intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); if (zeroes_done + BytesPerLong >= size_limit) { assert(allocation() != NULL, ""); ! if (allocation()->Opcode() == Op_Allocate) { Node* klass_node = allocation()->in(AllocateNode::KlassNode); ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); if (zeroes_done == k->layout_helper()) zeroes_done = size_limit; } --- 3930,3940 ---- zeroes_done = align_size_down(zeroes_done, BytesPerInt); // if it is the last unused 4 bytes of an instance, forget about it intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); if (zeroes_done + BytesPerLong >= size_limit) { assert(allocation() != NULL, ""); ! if (allocation()->Opcode() == Opcodes::Op_Allocate) { Node* klass_node = allocation()->in(AllocateNode::KlassNode); ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); if (zeroes_done == k->layout_helper()) zeroes_done = size_limit; }
< prev index next >