hotspot/src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot-comp Cdiff hotspot/src/share/vm/opto/memnode.cpp

hotspot/src/share/vm/opto/memnode.cpp

Print this page
rev 5144 : [mq]: webrev.03

*** 960,969 **** --- 960,982 ---- uint LoadNode::hash() const { // unroll addition of interesting fields return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); } + static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) { + if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) { + bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile(); + bool is_stable_ary = FoldStableValues && + (tp != NULL) && (tp->isa_aryptr() != NULL) && + tp->isa_aryptr()->is_stable(); + + return (eliminate_boxing && non_volatile) || is_stable_ary; + } + + return false; + } + //---------------------------can_see_stored_value------------------------------ // This routine exists to make sure this set of tests is done the same // everywhere. We need to make a coordinated change: first LoadNode::Ideal // will change the graph shape in a way which makes memory alive twice at the // same time (uses the Oracle model of aliasing), then some
*** 974,988 **** intptr_t ld_off = 0; AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; // This is more general than load from boxing objects. ! if (phase->C->eliminate_boxing() && (atp != NULL) && ! (atp->index() >= Compile::AliasIdxRaw) && ! (atp->field() != NULL) && !atp->field()->is_volatile()) { uint alias_idx = atp->index(); ! bool final = atp->field()->is_final(); Node* result = NULL; Node* current = st; // Skip through chains of MemBarNodes checking the MergeMems for // new states for the slice of this load. Stop once any other // kind of node is encountered. Loads from final memory can skip --- 987,999 ---- intptr_t ld_off = 0; AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; // This is more general than load from boxing objects. ! if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { uint alias_idx = atp->index(); ! bool final = !atp->is_rewritable(); Node* result = NULL; Node* current = st; // Skip through chains of MemBarNodes checking the MergeMems for // new states for the slice of this load. Stop once any other // kind of node is encountered. Loads from final memory can skip
*** 1013,1023 **** if (result != NULL) { st = result; } } - // Loop around twice in the case Load -> Initialize -> Store. // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) for (int trip = 0; trip <= 1; trip++) { if (st->is_Store()) { --- 1024,1033 ----
*** 1575,1584 **** --- 1585,1628 ---- // No match. return NULL; } + // Try to constant-fold a stable array element. + static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) { + assert(ary->is_stable(), "array should be stable"); + + if (ary->const_oop() != NULL) { + // Decode the results of GraphKit::array_element_address. + ciArray* aobj = ary->const_oop()->as_array(); + ciConstant con = aobj->element_value_by_offset(off); + + if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) { + const Type* con_type = Type::make_from_constant(con); + if (con_type != NULL) { + if (con_type->isa_aryptr()) { + // Join with the array element type, in case it is also stable. + int dim = ary->stable_dimension(); + con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1); + } + if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) { + con_type = con_type->make_narrowoop(); + } + #ifndef PRODUCT + if (TraceIterativeGVN) { + tty->print("FoldStableValues: array element [off=%d]: con_type=", off); + con_type->dump(); tty->cr(); + } + #endif //PRODUCT + return con_type; + } + } + } + + return NULL; + } + //------------------------------Value----------------------------------------- const Type *LoadNode::Value( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP Node* mem = in(MemNode::Memory); const Type *t1 = phase->type(mem);
*** 1589,1600 **** int off = tp->offset(); assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); Compile* C = phase->C; // Try to guess loaded type from pointer type ! if (tp->base() == Type::AryPtr) { ! const Type *t = tp->is_aryptr()->elem(); // Don't do this for integer types. There is only potential profit if // the element type t is lower than _type; that is, for int types, if _type is // more restrictive than t. This only happens here if one is short and the other // char (both 16 bits), and in those cases we've made an intentional decision // to use one kind of load over the other. See AndINode::Ideal and 4965907. --- 1633,1667 ---- int off = tp->offset(); assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); Compile* C = phase->C; // Try to guess loaded type from pointer type ! if (tp->isa_aryptr()) { ! const TypeAryPtr* ary = tp->is_aryptr(); ! const Type *t = ary->elem(); ! ! // Determine whether the reference is beyond the header or not, by comparing ! // the offset against the offset of the start of the array's data. ! // Different array types begin at slightly different offsets (12 vs. 16). ! // We choose T_BYTE as an example base type that is least restrictive ! // as to alignment, which will therefore produce the smallest ! // possible base offset. ! const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); ! const bool off_beyond_header = ((uint)off >= (uint)min_base_off); ! ! // Try to constant-fold a stable array element. ! if (FoldStableValues && ary->is_stable()) { ! // Make sure the reference is not into the header ! if (off_beyond_header && off != Type::OffsetBot) { ! assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant"); ! const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); ! if (con_type != NULL) { ! return con_type; ! } ! } ! } ! // Don't do this for integer types. There is only potential profit if // the element type t is lower than _type; that is, for int types, if _type is // more restrictive than t. This only happens here if one is short and the other // char (both 16 bits), and in those cases we've made an intentional decision // to use one kind of load over the other. See AndINode::Ideal and 4965907.
*** 1611,1628 **** if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. ! // Make sure the reference is not into the header, by comparing ! // the offset against the offset of the start of the array's data. ! // Different array types begin at slightly different offsets (12 vs. 16). ! // We choose T_BYTE as an example base type that is least restrictive ! // as to alignment, which will therefore produce the smallest ! // possible base offset. ! const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); ! if ((uint)off >= (uint)min_base_off) { // is the offset beyond the header? const Type* jt = t->join(_type); // In any case, do not allow the join, per se, to empty out the type. if (jt->empty() && !t->empty()) { // This can happen if a interface-typed array narrows to a class type. jt = _type; --- 1678,1688 ---- if ((t->isa_int() == NULL) && (t->isa_long() == NULL) && (_type->isa_vect() == NULL) && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. ! if (off_beyond_header) { // is the offset beyond the header? const Type* jt = t->join(_type); // In any case, do not allow the join, per se, to empty out the type. if (jt->empty() && !t->empty()) { // This can happen if a interface-typed array narrows to a class type. jt = _type;
hotspot/src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File