< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "utilities/copy.hpp"

  46 
  47 // Portions of code courtesy of Clifford Click
  48 
  49 // Optimization - Graph Style
  50 
  51 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  52 
  53 //=============================================================================
  54 uint MemNode::size_of() const { return sizeof(*this); }
  55 
  56 const TypePtr *MemNode::adr_type() const {
  57   Node* adr = in(Address);
  58   if (adr == NULL)  return NULL; // node is dead
  59   const TypePtr* cross_check = NULL;
  60   DEBUG_ONLY(cross_check = _adr_type);
  61   return calculate_adr_type(adr->bottom_type(), cross_check);
  62 }
  63 
  64 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  65   if (adr != NULL) {


 696         continue;           // (a) advance through independent MergeMem memory
 697       }
 698     }
 699 
 700     // Unless there is an explicit 'continue', we must bail out here,
 701     // because 'mem' is an inscrutable memory state (e.g., a call).
 702     break;
 703   }
 704 
 705   return NULL;              // bail out
 706 }
 707 
 708 //----------------------calculate_adr_type-------------------------------------
 709 // Helper function.  Notices when the given type of address hits top or bottom.
 710 // Also, asserts a cross-check of the type against the expected address type.
 711 const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) {
 712   if (t == Type::TOP)  return NULL; // does not touch memory any more?
 713   #ifdef PRODUCT
 714   cross_check = NULL;
 715   #else
 716   if (!VerifyAliases || is_error_reported() || Node::in_dump())  cross_check = NULL;
 717   #endif
 718   const TypePtr* tp = t->isa_ptr();
 719   if (tp == NULL) {
 720     assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide");
 721     return TypePtr::BOTTOM;           // touches lots of memory
 722   } else {
 723     #ifdef ASSERT
 724     // %%%% [phh] We don't check the alias index if cross_check is
 725     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 726     if (cross_check != NULL &&
 727         cross_check != TypePtr::BOTTOM &&
 728         cross_check != TypeRawPtr::BOTTOM) {
 729       // Recheck the alias index, to see if it has changed (due to a bug).
 730       Compile* C = Compile::current();
 731       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 732              "must stay in the original alias category");
 733       // The type of the address must be contained in the adr_type,
 734       // disregarding "null"-ness.
 735       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 736       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();


4369     if (mem == base_mem) { st->print(" -"); continue; }
4370     st->print( " N%d:", mem->_idx );
4371     Compile::current()->get_adr_type(i)->dump_on(st);
4372   }
4373   st->print(" }");
4374 }
4375 #endif // !PRODUCT
4376 
4377 
4378 #ifdef ASSERT
4379 static bool might_be_same(Node* a, Node* b) {
4380   if (a == b)  return true;
4381   if (!(a->is_Phi() || b->is_Phi()))  return false;
4382   // phis shift around during optimization
4383   return true;  // pretty stupid...
4384 }
4385 
4386 // verify a narrow slice (either incoming or outgoing)
4387 static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
4388   if (!VerifyAliases)       return;  // don't bother to verify unless requested
4389   if (is_error_reported())  return;  // muzzle asserts when debugging an error
4390   if (Node::in_dump())      return;  // muzzle asserts when printing
4391   assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel");
4392   assert(n != NULL, "");
4393   // Elide intervening MergeMem's
4394   while (n->is_MergeMem()) {
4395     n = n->as_MergeMem()->memory_at(alias_idx);
4396   }
4397   Compile* C = Compile::current();
4398   const TypePtr* n_adr_type = n->adr_type();
4399   if (n == m->empty_memory()) {
4400     // Implicit copy of base_memory()
4401   } else if (n_adr_type != TypePtr::BOTTOM) {
4402     assert(n_adr_type != NULL, "new memory must have a well-defined adr_type");
4403     assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");
4404   } else {
4405     // A few places like make_runtime_call "know" that VM calls are narrow,
4406     // and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM.
4407     bool expected_wide_mem = false;
4408     if (n == m->base_memory()) {
4409       expected_wide_mem = true;


4430          "must avoid base_memory and AliasIdxTop");
4431 
4432   // Otherwise, it is a narrow slice.
4433   Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
4434   Compile *C = Compile::current();
4435   if (is_empty_memory(n)) {
4436     // the array is sparse; empty slots are the "top" node
4437     n = base_memory();
4438     assert(Node::in_dump()
4439            || n == NULL || n->bottom_type() == Type::TOP
4440            || n->adr_type() == NULL // address is TOP
4441            || n->adr_type() == TypePtr::BOTTOM
4442            || n->adr_type() == TypeRawPtr::BOTTOM
4443            || Compile::current()->AliasLevel() == 0,
4444            "must be a wide memory");
4445     // AliasLevel == 0 if we are organizing the memory states manually.
4446     // See verify_memory_slice for comments on TypeRawPtr::BOTTOM.
4447   } else {
4448     // make sure the stored slice is sane
4449     #ifdef ASSERT
4450     if (is_error_reported() || Node::in_dump()) {
4451     } else if (might_be_same(n, base_memory())) {
4452       // Give it a pass:  It is a mostly harmless repetition of the base.
4453       // This can arise normally from node subsumption during optimization.
4454     } else {
4455       verify_memory_slice(this, alias_idx, n);
4456     }
4457     #endif
4458   }
4459   return n;
4460 }
4461 
4462 //---------------------------set_memory_at-------------------------------------
4463 void MergeMemNode::set_memory_at(uint alias_idx, Node *n) {
4464   verify_memory_slice(this, alias_idx, n);
4465   Node* empty_mem = empty_memory();
4466   if (n == base_memory())  n = empty_mem;  // collapse default
4467   uint need_req = alias_idx+1;
4468   if (req() < need_req) {
4469     if (n == empty_mem)  return;  // already the default, so do not grow me
4470     // grow the sparse array




  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "utilities/copy.hpp"
  46 #include "utilities/vmError.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  53 
  54 //=============================================================================
  55 uint MemNode::size_of() const { return sizeof(*this); }
  56 
  57 const TypePtr *MemNode::adr_type() const {
  58   Node* adr = in(Address);
  59   if (adr == NULL)  return NULL; // node is dead
  60   const TypePtr* cross_check = NULL;
  61   DEBUG_ONLY(cross_check = _adr_type);
  62   return calculate_adr_type(adr->bottom_type(), cross_check);
  63 }
  64 
  65 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  66   if (adr != NULL) {


 697         continue;           // (a) advance through independent MergeMem memory
 698       }
 699     }
 700 
 701     // Unless there is an explicit 'continue', we must bail out here,
 702     // because 'mem' is an inscrutable memory state (e.g., a call).
 703     break;
 704   }
 705 
 706   return NULL;              // bail out
 707 }
 708 
 709 //----------------------calculate_adr_type-------------------------------------
 710 // Helper function.  Notices when the given type of address hits top or bottom.
 711 // Also, asserts a cross-check of the type against the expected address type.
 712 const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) {
 713   if (t == Type::TOP)  return NULL; // does not touch memory any more?
 714   #ifdef PRODUCT
 715   cross_check = NULL;
 716   #else
 717   if (!VerifyAliases || VMError::is_error_reported() || Node::in_dump())  cross_check = NULL;
 718   #endif
 719   const TypePtr* tp = t->isa_ptr();
 720   if (tp == NULL) {
 721     assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide");
 722     return TypePtr::BOTTOM;           // touches lots of memory
 723   } else {
 724     #ifdef ASSERT
 725     // %%%% [phh] We don't check the alias index if cross_check is
 726     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 727     if (cross_check != NULL &&
 728         cross_check != TypePtr::BOTTOM &&
 729         cross_check != TypeRawPtr::BOTTOM) {
 730       // Recheck the alias index, to see if it has changed (due to a bug).
 731       Compile* C = Compile::current();
 732       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 733              "must stay in the original alias category");
 734       // The type of the address must be contained in the adr_type,
 735       // disregarding "null"-ness.
 736       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 737       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();


4370     if (mem == base_mem) { st->print(" -"); continue; }
4371     st->print( " N%d:", mem->_idx );
4372     Compile::current()->get_adr_type(i)->dump_on(st);
4373   }
4374   st->print(" }");
4375 }
4376 #endif // !PRODUCT
4377 
4378 
4379 #ifdef ASSERT
4380 static bool might_be_same(Node* a, Node* b) {
4381   if (a == b)  return true;
4382   if (!(a->is_Phi() || b->is_Phi()))  return false;
4383   // phis shift around during optimization
4384   return true;  // pretty stupid...
4385 }
4386 
4387 // verify a narrow slice (either incoming or outgoing)
4388 static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
4389   if (!VerifyAliases)                return;  // don't bother to verify unless requested
4390   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
4391   if (Node::in_dump())               return;  // muzzle asserts when printing
4392   assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel");
4393   assert(n != NULL, "");
4394   // Elide intervening MergeMem's
4395   while (n->is_MergeMem()) {
4396     n = n->as_MergeMem()->memory_at(alias_idx);
4397   }
4398   Compile* C = Compile::current();
4399   const TypePtr* n_adr_type = n->adr_type();
4400   if (n == m->empty_memory()) {
4401     // Implicit copy of base_memory()
4402   } else if (n_adr_type != TypePtr::BOTTOM) {
4403     assert(n_adr_type != NULL, "new memory must have a well-defined adr_type");
4404     assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");
4405   } else {
4406     // A few places like make_runtime_call "know" that VM calls are narrow,
4407     // and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM.
4408     bool expected_wide_mem = false;
4409     if (n == m->base_memory()) {
4410       expected_wide_mem = true;


4431          "must avoid base_memory and AliasIdxTop");
4432 
4433   // Otherwise, it is a narrow slice.
4434   Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
4435   Compile *C = Compile::current();
4436   if (is_empty_memory(n)) {
4437     // the array is sparse; empty slots are the "top" node
4438     n = base_memory();
4439     assert(Node::in_dump()
4440            || n == NULL || n->bottom_type() == Type::TOP
4441            || n->adr_type() == NULL // address is TOP
4442            || n->adr_type() == TypePtr::BOTTOM
4443            || n->adr_type() == TypeRawPtr::BOTTOM
4444            || Compile::current()->AliasLevel() == 0,
4445            "must be a wide memory");
4446     // AliasLevel == 0 if we are organizing the memory states manually.
4447     // See verify_memory_slice for comments on TypeRawPtr::BOTTOM.
4448   } else {
4449     // make sure the stored slice is sane
4450     #ifdef ASSERT
4451     if (VMError::is_error_reported() || Node::in_dump()) {
4452     } else if (might_be_same(n, base_memory())) {
4453       // Give it a pass:  It is a mostly harmless repetition of the base.
4454       // This can arise normally from node subsumption during optimization.
4455     } else {
4456       verify_memory_slice(this, alias_idx, n);
4457     }
4458     #endif
4459   }
4460   return n;
4461 }
4462 
4463 //---------------------------set_memory_at-------------------------------------
4464 void MergeMemNode::set_memory_at(uint alias_idx, Node *n) {
4465   verify_memory_slice(this, alias_idx, n);
4466   Node* empty_mem = empty_memory();
4467   if (n == base_memory())  n = empty_mem;  // collapse default
4468   uint need_req = alias_idx+1;
4469   if (req() < need_req) {
4470     if (n == empty_mem)  return;  // already the default, so do not grow me
4471     // grow the sparse array


< prev index next >