< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"

  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/narrowptrnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"

  44 #include "utilities/copy.hpp"
  45 
  46 // Portions of code courtesy of Clifford Click
  47 
  48 // Optimization - Graph Style
  49 
  50 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  51 
  52 //=============================================================================
  53 uint MemNode::size_of() const { return sizeof(*this); }
  54 
  55 const TypePtr *MemNode::adr_type() const {
  56   Node* adr = in(Address);
  57   if (adr == NULL)  return NULL; // node is dead
  58   const TypePtr* cross_check = NULL;
  59   DEBUG_ONLY(cross_check = _adr_type);
  60   return calculate_adr_type(adr->bottom_type(), cross_check);
  61 }
  62 
  63 #ifndef PRODUCT


 983     }
 984 
 985     // A load from an initialization barrier can match a captured store.
 986     if (st->is_Proj() && st->in(0)->is_Initialize()) {
 987       InitializeNode* init = st->in(0)->as_Initialize();
 988       AllocateNode* alloc = init->allocation();
 989       if ((alloc != NULL) && (alloc == ld_alloc)) {
 990         // examine a captured store value
 991         st = init->find_captured_store(ld_off, memory_size(), phase);
 992         if (st != NULL) {
 993           continue;             // take one more trip around
 994         }
 995       }
 996     }
 997 
 998     // Load boxed value from result of valueOf() call is input parameter.
 999     if (this->is_Load() && ld_adr->is_AddP() &&
1000         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1001       intptr_t ignore = 0;
1002       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);

1003       if (base != NULL && base->is_Proj() &&
1004           base->as_Proj()->_con == TypeFunc::Parms &&
1005           base->in(0)->is_CallStaticJava() &&
1006           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1007         return base->in(0)->in(TypeFunc::Parms);
1008       }
1009     }
1010 
1011     break;
1012   }
1013 
1014   return NULL;
1015 }
1016 
1017 //----------------------is_instance_field_load_with_local_phi------------------
1018 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1019   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1020       in(Address)->is_AddP() ) {
1021     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1022     // Only instances and boxed values.


1586   if (t1 == Type::TOP)  return Type::TOP;
1587   Node* adr = in(MemNode::Address);
1588   const TypePtr* tp = phase->type(adr)->isa_ptr();
1589   if (tp == NULL || tp->empty())  return Type::TOP;
1590   int off = tp->offset();
1591   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1592   Compile* C = phase->C;
1593 
1594   // Try to guess loaded type from pointer type
1595   if (tp->isa_aryptr()) {
1596     const TypeAryPtr* ary = tp->is_aryptr();
1597     const Type* t = ary->elem();
1598 
1599     // Determine whether the reference is beyond the header or not, by comparing
1600     // the offset against the offset of the start of the array's data.
1601     // Different array types begin at slightly different offsets (12 vs. 16).
1602     // We choose T_BYTE as an example base type that is least restrictive
1603     // as to alignment, which will therefore produce the smallest
1604     // possible base offset.
1605     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1606     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
1607 
1608     // Try to constant-fold a stable array element.
1609     if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1610       // Make sure the reference is not into the header and the offset is constant
1611       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1612         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1613         if (con_type != NULL) {
1614           return con_type;
1615         }
1616       }
1617     }
1618 
1619     // Don't do this for integer types. There is only potential profit if
1620     // the element type t is lower than _type; that is, for int types, if _type is
1621     // more restrictive than t.  This only happens here if one is short and the other
1622     // char (both 16 bits), and in those cases we've made an intentional decision
1623     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1624     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1625     //
1626     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shenandoah/brooksPointer.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "opto/shenandoahSupport.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  53 
  54 //=============================================================================
  55 uint MemNode::size_of() const { return sizeof(*this); }
  56 
  57 const TypePtr *MemNode::adr_type() const {
  58   Node* adr = in(Address);
  59   if (adr == NULL)  return NULL; // node is dead
  60   const TypePtr* cross_check = NULL;
  61   DEBUG_ONLY(cross_check = _adr_type);
  62   return calculate_adr_type(adr->bottom_type(), cross_check);
  63 }
  64 
  65 #ifndef PRODUCT


 985     }
 986 
 987     // A load from an initialization barrier can match a captured store.
 988     if (st->is_Proj() && st->in(0)->is_Initialize()) {
 989       InitializeNode* init = st->in(0)->as_Initialize();
 990       AllocateNode* alloc = init->allocation();
 991       if ((alloc != NULL) && (alloc == ld_alloc)) {
 992         // examine a captured store value
 993         st = init->find_captured_store(ld_off, memory_size(), phase);
 994         if (st != NULL) {
 995           continue;             // take one more trip around
 996         }
 997       }
 998     }
 999 
1000     // Load boxed value from result of valueOf() call is input parameter.
1001     if (this->is_Load() && ld_adr->is_AddP() &&
1002         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1003       intptr_t ignore = 0;
1004       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1005       base = ShenandoahBarrierNode::skip_through_barrier(base);
1006       if (base != NULL && base->is_Proj() &&
1007           base->as_Proj()->_con == TypeFunc::Parms &&
1008           base->in(0)->is_CallStaticJava() &&
1009           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1010         return base->in(0)->in(TypeFunc::Parms);
1011       }
1012     }
1013 
1014     break;
1015   }
1016 
1017   return NULL;
1018 }
1019 
1020 //----------------------is_instance_field_load_with_local_phi------------------
1021 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1022   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1023       in(Address)->is_AddP() ) {
1024     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1025     // Only instances and boxed values.


1589   if (t1 == Type::TOP)  return Type::TOP;
1590   Node* adr = in(MemNode::Address);
1591   const TypePtr* tp = phase->type(adr)->isa_ptr();
1592   if (tp == NULL || tp->empty())  return Type::TOP;
1593   int off = tp->offset();
1594   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1595   Compile* C = phase->C;
1596 
1597   // Try to guess loaded type from pointer type
1598   if (tp->isa_aryptr()) {
1599     const TypeAryPtr* ary = tp->is_aryptr();
1600     const Type* t = ary->elem();
1601 
1602     // Determine whether the reference is beyond the header or not, by comparing
1603     // the offset against the offset of the start of the array's data.
1604     // Different array types begin at slightly different offsets (12 vs. 16).
1605     // We choose T_BYTE as an example base type that is least restrictive
1606     // as to alignment, which will therefore produce the smallest
1607     // possible base offset.
1608     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1609     const bool off_beyond_header = (off != BrooksPointer::BYTE_OFFSET || !UseShenandoahGC) && ((uint)off >= (uint)min_base_off);
1610 
1611     // Try to constant-fold a stable array element.
1612     if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1613       // Make sure the reference is not into the header and the offset is constant
1614       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1615         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1616         if (con_type != NULL) {
1617           return con_type;
1618         }
1619       }
1620     }
1621 
1622     // Don't do this for integer types. There is only potential profit if
1623     // the element type t is lower than _type; that is, for int types, if _type is
1624     // more restrictive than t.  This only happens here if one is short and the other
1625     // char (both 16 bits), and in those cases we've made an intentional decision
1626     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1627     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1628     //
1629     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))


< prev index next >