< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/narrowptrnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"

  44 #include "utilities/copy.hpp"
  45 
  46 // Portions of code courtesy of Clifford Click
  47 
  48 // Optimization - Graph Style
  49 
  50 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  51 
  52 //=============================================================================
  53 uint MemNode::size_of() const { return sizeof(*this); }
  54 
  55 const TypePtr *MemNode::adr_type() const {
  56   Node* adr = in(Address);
  57   if (adr == NULL)  return NULL; // node is dead
  58   const TypePtr* cross_check = NULL;
  59   DEBUG_ONLY(cross_check = _adr_type);
  60   return calculate_adr_type(adr->bottom_type(), cross_check);
  61 }
  62 
  63 #ifndef PRODUCT


 983     }
 984 
 985     // A load from an initialization barrier can match a captured store.
 986     if (st->is_Proj() && st->in(0)->is_Initialize()) {
 987       InitializeNode* init = st->in(0)->as_Initialize();
 988       AllocateNode* alloc = init->allocation();
 989       if ((alloc != NULL) && (alloc == ld_alloc)) {
 990         // examine a captured store value
 991         st = init->find_captured_store(ld_off, memory_size(), phase);
 992         if (st != NULL) {
 993           continue;             // take one more trip around
 994         }
 995       }
 996     }
 997 
 998     // Load boxed value from result of valueOf() call is input parameter.
 999     if (this->is_Load() && ld_adr->is_AddP() &&
1000         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1001       intptr_t ignore = 0;
1002       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);

1003       if (base != NULL && base->is_Proj() &&
1004           base->as_Proj()->_con == TypeFunc::Parms &&
1005           base->in(0)->is_CallStaticJava() &&
1006           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1007         return base->in(0)->in(TypeFunc::Parms);
1008       }
1009     }
1010 
1011     break;
1012   }
1013 
1014   return NULL;
1015 }
1016 
1017 //----------------------is_instance_field_load_with_local_phi------------------
1018 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1019   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1020       in(Address)->is_AddP() ) {
1021     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1022     // Only instances and boxed values.


1586   if (t1 == Type::TOP)  return Type::TOP;
1587   Node* adr = in(MemNode::Address);
1588   const TypePtr* tp = phase->type(adr)->isa_ptr();
1589   if (tp == NULL || tp->empty())  return Type::TOP;
1590   int off = tp->offset();
1591   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1592   Compile* C = phase->C;
1593 
1594   // Try to guess loaded type from pointer type
1595   if (tp->isa_aryptr()) {
1596     const TypeAryPtr* ary = tp->is_aryptr();
1597     const Type* t = ary->elem();
1598 
1599     // Determine whether the reference is beyond the header or not, by comparing
1600     // the offset against the offset of the start of the array's data.
1601     // Different array types begin at slightly different offsets (12 vs. 16).
1602     // We choose T_BYTE as an example base type that is least restrictive
1603     // as to alignment, which will therefore produce the smallest
1604     // possible base offset.
1605     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1606     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
1607 
1608     // Try to constant-fold a stable array element.
1609     if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1610       // Make sure the reference is not into the header and the offset is constant
1611       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1612         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1613         if (con_type != NULL) {
1614           return con_type;
1615         }
1616       }
1617     }
1618 
1619     // Don't do this for integer types. There is only potential profit if
1620     // the element type t is lower than _type; that is, for int types, if _type is
1621     // more restrictive than t.  This only happens here if one is short and the other
1622     // char (both 16 bits), and in those cases we've made an intentional decision
1623     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1624     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1625     //
1626     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/narrowptrnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/shenandoahSupport.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 // Portions of code courtesy of Clifford Click
  48 
  49 // Optimization - Graph Style
  50 
  51 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  52 
  53 //=============================================================================
  54 uint MemNode::size_of() const { return sizeof(*this); }
  55 
  56 const TypePtr *MemNode::adr_type() const {
  57   Node* adr = in(Address);
  58   if (adr == NULL)  return NULL; // node is dead
  59   const TypePtr* cross_check = NULL;
  60   DEBUG_ONLY(cross_check = _adr_type);
  61   return calculate_adr_type(adr->bottom_type(), cross_check);
  62 }
  63 
  64 #ifndef PRODUCT


 984     }
 985 
 986     // A load from an initialization barrier can match a captured store.
 987     if (st->is_Proj() && st->in(0)->is_Initialize()) {
 988       InitializeNode* init = st->in(0)->as_Initialize();
 989       AllocateNode* alloc = init->allocation();
 990       if ((alloc != NULL) && (alloc == ld_alloc)) {
 991         // examine a captured store value
 992         st = init->find_captured_store(ld_off, memory_size(), phase);
 993         if (st != NULL) {
 994           continue;             // take one more trip around
 995         }
 996       }
 997     }
 998 
 999     // Load boxed value from result of valueOf() call is input parameter.
1000     if (this->is_Load() && ld_adr->is_AddP() &&
1001         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1002       intptr_t ignore = 0;
1003       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1004       base = ShenandoahBarrierNode::skip_through_barrier(base);
1005       if (base != NULL && base->is_Proj() &&
1006           base->as_Proj()->_con == TypeFunc::Parms &&
1007           base->in(0)->is_CallStaticJava() &&
1008           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1009         return base->in(0)->in(TypeFunc::Parms);
1010       }
1011     }
1012 
1013     break;
1014   }
1015 
1016   return NULL;
1017 }
1018 
1019 //----------------------is_instance_field_load_with_local_phi------------------
1020 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1021   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1022       in(Address)->is_AddP() ) {
1023     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1024     // Only instances and boxed values.


1588   if (t1 == Type::TOP)  return Type::TOP;
1589   Node* adr = in(MemNode::Address);
1590   const TypePtr* tp = phase->type(adr)->isa_ptr();
1591   if (tp == NULL || tp->empty())  return Type::TOP;
1592   int off = tp->offset();
1593   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1594   Compile* C = phase->C;
1595 
1596   // Try to guess loaded type from pointer type
1597   if (tp->isa_aryptr()) {
1598     const TypeAryPtr* ary = tp->is_aryptr();
1599     const Type* t = ary->elem();
1600 
1601     // Determine whether the reference is beyond the header or not, by comparing
1602     // the offset against the offset of the start of the array's data.
1603     // Different array types begin at slightly different offsets (12 vs. 16).
1604     // We choose T_BYTE as an example base type that is least restrictive
1605     // as to alignment, which will therefore produce the smallest
1606     // possible base offset.
1607     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1608     const bool off_beyond_header = (off != -8 || !UseShenandoahGC) && ((uint)off >= (uint)min_base_off);
1609 
1610     // Try to constant-fold a stable array element.
1611     if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1612       // Make sure the reference is not into the header and the offset is constant
1613       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1614         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1615         if (con_type != NULL) {
1616           return con_type;
1617         }
1618       }
1619     }
1620 
1621     // Don't do this for integer types. There is only potential profit if
1622     // the element type t is lower than _type; that is, for int types, if _type is
1623     // more restrictive than t.  This only happens here if one is short and the other
1624     // char (both 16 bits), and in those cases we've made an intentional decision
1625     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1626     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1627     //
1628     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))


< prev index next >