src/share/vm/gc_implementation/g1/heapRegion.hpp

Print this page
rev 6540 : 8054819: Rename HeapRegionSeq to HeapRegionManager
Reviewed-by: jwilhelm, jmasa


  37 #if INCLUDE_ALL_GCS
  38 
  39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  40 // can be collected independently.
  41 
  42 // NOTE: Although a HeapRegion is a Space, its
  43 // Space::initDirtyCardClosure method must not be called.
  44 // The problem is that the existence of this method breaks
  45 // the independence of barrier sets from remembered sets.
  46 // The solution is to remove this method from the definition
  47 // of a Space.
  48 
  49 class HeapRegionRemSet;
  50 class HeapRegionRemSetIterator;
  51 class HeapRegion;
  52 class HeapRegionSetBase;
  53 class nmethod;
  54 
  55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
  56 #define HR_FORMAT_PARAMS(_hr_) \
  57                 (_hr_)->hrs_index(), \
  58                 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
  59                 (_hr_)->startsHumongous() ? "HS" : \
  60                 (_hr_)->continuesHumongous() ? "HC" : \
  61                 !(_hr_)->is_empty() ? "O" : "F", \
  62                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  63 
  64 // sentinel value for hrs_index
  65 #define G1_NO_HRS_INDEX ((uint) -1)
  66 
  67 // A dirty card to oop closure for heap regions. It
  68 // knows how to get the G1 heap and how to use the bitmap
  69 // in the concurrent marker used by G1 to filter remembered
  70 // sets.
  71 
  72 class HeapRegionDCTOC : public DirtyCardToOopClosure {
  73 public:
  74   // Specification of possible DirtyCardToOopClosure filtering.
  75   enum FilterKind {
  76     NoFilterKind,
  77     IntoCSFilterKind,
  78     OutOfRegionFilterKind
  79   };
  80 
  81 protected:
  82   HeapRegion* _hr;
  83   FilterKind _fk;
  84   G1CollectedHeap* _g1;
  85 


 217 
 218 class HeapRegion: public G1OffsetTableContigSpace {
 219   friend class VMStructs;
 220  private:
 221 
 222   enum HumongousType {
 223     NotHumongous = 0,
 224     StartsHumongous,
 225     ContinuesHumongous
 226   };
 227 
 228   // The remembered set for this region.
 229   // (Might want to make this "inline" later, to avoid some alloc failure
 230   // issues.)
 231   HeapRegionRemSet* _rem_set;
 232 
 233   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 234 
 235  protected:
 236   // The index of this region in the heap region sequence.
 237   uint  _hrs_index;
 238 
 239   HumongousType _humongous_type;
 240   // For a humongous region, region in which it starts.
 241   HeapRegion* _humongous_start_region;
 242   // For the start region of a humongous sequence, it's original end().
 243   HeapWord* _orig_end;
 244 
 245   // True iff the region is in current collection_set.
 246   bool _in_collection_set;
 247 
 248   // True iff an attempt to evacuate an object in the region failed.
 249   bool _evacuation_failed;
 250 
 251   // A heap region may be a member one of a number of special subsets, each
 252   // represented as linked lists through the field below.  Currently, there
 253   // is only one set:
 254   //   The collection set.
 255   HeapRegion* _next_in_special_set;
 256 
 257   // next region in the young "generation" region set


 313     //assert(_young_type != new_type, "setting the same type" );
 314     // TODO: add more assertions here
 315     _young_type = new_type;
 316   }
 317 
 318   // Cached attributes used in the collection set policy information
 319 
 320   // The RSet length that was added to the total value
 321   // for the collection set.
 322   size_t _recorded_rs_length;
 323 
 324   // The predicted elapsed time that was added to total value
 325   // for the collection set.
 326   double _predicted_elapsed_time_ms;
 327 
 328   // The predicted number of bytes to copy that was added to
 329   // the total value for the collection set.
 330   size_t _predicted_bytes_to_copy;
 331 
 332  public:
 333   HeapRegion(uint hrs_index,
 334              G1BlockOffsetSharedArray* sharedOffsetArray,
 335              MemRegion mr);
 336 
 337   // Initializing the HeapRegion not only resets the data structure, but also
 338   // resets the BOT for that heap region.
 339   // The default values for clear_space means that we will do the clearing if
 340   // there's clearing to be done ourselves. We also always mangle the space.
 341   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 342 
 343   static int    LogOfHRGrainBytes;
 344   static int    LogOfHRGrainWords;
 345 
 346   static size_t GrainBytes;
 347   static size_t GrainWords;
 348   static size_t CardsPerRegion;
 349 
 350   static size_t align_up_to_region_byte_size(size_t sz) {
 351     return (sz + (size_t) GrainBytes - 1) &
 352                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
 353   }


 368     NoteEndClaimValue          = 2,
 369     ScrubRemSetClaimValue      = 3,
 370     ParVerifyClaimValue        = 4,
 371     RebuildRSClaimValue        = 5,
 372     ParEvacFailureClaimValue   = 6,
 373     AggregateCountClaimValue   = 7,
 374     VerifyCountClaimValue      = 8,
 375     ParMarkRootClaimValue      = 9
 376   };
 377 
 378   // All allocated blocks are occupied by objects in a HeapRegion
 379   bool block_is_obj(const HeapWord* p) const;
 380 
 381   // Returns the object size for all valid block starts
 382   // and the amount of unallocated words if called on top()
 383   size_t block_size(const HeapWord* p) const;
 384 
 385   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
 386   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 387 
 388   // If this region is a member of a HeapRegionSeq, the index in that
 389   // sequence, otherwise -1.
 390   uint hrs_index() const { return _hrs_index; }
 391 
 392   // The number of bytes marked live in the region in the last marking phase.
 393   size_t marked_bytes()    { return _prev_marked_bytes; }
 394   size_t live_bytes() {
 395     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 396   }
 397 
 398   // The number of bytes counted in the next marking.
 399   size_t next_marked_bytes() { return _next_marked_bytes; }
 400   // The number of bytes live wrt the next marking.
 401   size_t next_live_bytes() {
 402     return
 403       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 404   }
 405 
 406   // A lower bound on the amount of garbage bytes in the region.
 407   size_t garbage_bytes() {
 408     size_t used_at_mark_start_bytes =
 409       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
 410     assert(used_at_mark_start_bytes >= marked_bytes(),


 441   HeapRegion* humongous_start_region() const {
 442     return _humongous_start_region;
 443   }
 444 
 445   // Return the number of distinct regions that are covered by this region:
 446   // 1 if the region is not humongous, >= 1 if the region is humongous.
 447   uint region_num() const {
 448     if (!isHumongous()) {
 449       return 1U;
 450     } else {
 451       assert(startsHumongous(), "doesn't make sense on HC regions");
 452       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 453       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 454     }
 455   }
 456 
 457   // Return the index + 1 of the last HC regions that's associated
 458   // with this HS region.
 459   uint last_hc_index() const {
 460     assert(startsHumongous(), "don't call this otherwise");
 461     return hrs_index() + region_num();
 462   }
 463 
 464   // Same as Space::is_in_reserved, but will use the original size of the region.
 465   // The original size is different only for start humongous regions. They get
 466   // their _end set up to be the end of the last continues region of the
 467   // corresponding humongous object.
 468   bool is_in_reserved_raw(const void* p) const {
 469     return _bottom <= p && p < _orig_end;
 470   }
 471 
 472   // Makes the current region be a "starts humongous" region, i.e.,
 473   // the first region in a series of one or more contiguous regions
 474   // that will contain a single "humongous" object. The two parameters
 475   // are as follows:
 476   //
 477   // new_top : The new value of the top field of this region which
 478   // points to the end of the humongous object that's being
 479   // allocated. If there is more than one region in the series, top
 480   // will lie beyond this region's original end field and on the last
 481   // region in the series.


 796   // vo == UseMarkWord    -> use the mark word in the object header
 797   //
 798   // NOTE: Only the "prev" marking information is guaranteed to be
 799   // consistent most of the time, so most calls to this should use
 800   // vo == UsePrevMarking.
 801   // Currently, there is only one case where this is called with
 802   // vo == UseNextMarking, which is to verify the "next" marking
 803   // information at the end of remark.
 804   // Currently there is only one place where this is called with
 805   // vo == UseMarkWord, which is to verify the marking during a
 806   // full GC.
 807   void verify(VerifyOption vo, bool *failures) const;
 808 
 809   // Override; it uses the "prev" marking information
 810   virtual void verify() const;
 811 };
 812 
 813 // HeapRegionClosure is used for iterating over regions.
 814 // Terminates the iteration when the "doHeapRegion" method returns "true".
 815 class HeapRegionClosure : public StackObj {
 816   friend class HeapRegionSeq;
 817   friend class G1CollectedHeap;
 818 
 819   bool _complete;
 820   void incomplete() { _complete = false; }
 821 
 822  public:
 823   HeapRegionClosure(): _complete(true) {}
 824 
 825   // Typically called on each region until it returns true.
 826   virtual bool doHeapRegion(HeapRegion* r) = 0;
 827 
 828   // True after iteration if the closure was applied to all heap regions
 829   // and returned "false" in all cases.
 830   bool complete() { return _complete; }
 831 };
 832 
 833 #endif // INCLUDE_ALL_GCS
 834 
 835 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP


  37 #if INCLUDE_ALL_GCS
  38 
  39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  40 // can be collected independently.
  41 
  42 // NOTE: Although a HeapRegion is a Space, its
  43 // Space::initDirtyCardClosure method must not be called.
  44 // The problem is that the existence of this method breaks
  45 // the independence of barrier sets from remembered sets.
  46 // The solution is to remove this method from the definition
  47 // of a Space.
  48 
  49 class HeapRegionRemSet;
  50 class HeapRegionRemSetIterator;
  51 class HeapRegion;
  52 class HeapRegionSetBase;
  53 class nmethod;
  54 
  55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
  56 #define HR_FORMAT_PARAMS(_hr_) \
  57                 (_hr_)->hrm_index(), \
  58                 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
  59                 (_hr_)->startsHumongous() ? "HS" : \
  60                 (_hr_)->continuesHumongous() ? "HC" : \
  61                 !(_hr_)->is_empty() ? "O" : "F", \
  62                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  63 
  64 // sentinel value for hrm_index
  65 #define G1_NO_HRM_INDEX ((uint) -1)
  66 
  67 // A dirty card to oop closure for heap regions. It
  68 // knows how to get the G1 heap and how to use the bitmap
  69 // in the concurrent marker used by G1 to filter remembered
  70 // sets.
  71 
  72 class HeapRegionDCTOC : public DirtyCardToOopClosure {
  73 public:
  74   // Specification of possible DirtyCardToOopClosure filtering.
  75   enum FilterKind {
  76     NoFilterKind,
  77     IntoCSFilterKind,
  78     OutOfRegionFilterKind
  79   };
  80 
  81 protected:
  82   HeapRegion* _hr;
  83   FilterKind _fk;
  84   G1CollectedHeap* _g1;
  85 


 217 
 218 class HeapRegion: public G1OffsetTableContigSpace {
 219   friend class VMStructs;
 220  private:
 221 
 222   enum HumongousType {
 223     NotHumongous = 0,
 224     StartsHumongous,
 225     ContinuesHumongous
 226   };
 227 
 228   // The remembered set for this region.
 229   // (Might want to make this "inline" later, to avoid some alloc failure
 230   // issues.)
 231   HeapRegionRemSet* _rem_set;
 232 
 233   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 234 
 235  protected:
 236   // The index of this region in the heap region sequence.
 237   uint  _hrm_index;
 238 
 239   HumongousType _humongous_type;
 240   // For a humongous region, region in which it starts.
 241   HeapRegion* _humongous_start_region;
 242   // For the start region of a humongous sequence, it's original end().
 243   HeapWord* _orig_end;
 244 
 245   // True iff the region is in current collection_set.
 246   bool _in_collection_set;
 247 
 248   // True iff an attempt to evacuate an object in the region failed.
 249   bool _evacuation_failed;
 250 
 251   // A heap region may be a member one of a number of special subsets, each
 252   // represented as linked lists through the field below.  Currently, there
 253   // is only one set:
 254   //   The collection set.
 255   HeapRegion* _next_in_special_set;
 256 
 257   // next region in the young "generation" region set


 313     //assert(_young_type != new_type, "setting the same type" );
 314     // TODO: add more assertions here
 315     _young_type = new_type;
 316   }
 317 
 318   // Cached attributes used in the collection set policy information
 319 
 320   // The RSet length that was added to the total value
 321   // for the collection set.
 322   size_t _recorded_rs_length;
 323 
 324   // The predicted elapsed time that was added to total value
 325   // for the collection set.
 326   double _predicted_elapsed_time_ms;
 327 
 328   // The predicted number of bytes to copy that was added to
 329   // the total value for the collection set.
 330   size_t _predicted_bytes_to_copy;
 331 
 332  public:
 333   HeapRegion(uint hrm_index,
 334              G1BlockOffsetSharedArray* sharedOffsetArray,
 335              MemRegion mr);
 336 
 337   // Initializing the HeapRegion not only resets the data structure, but also
 338   // resets the BOT for that heap region.
 339   // The default values for clear_space means that we will do the clearing if
 340   // there's clearing to be done ourselves. We also always mangle the space.
 341   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 342 
 343   static int    LogOfHRGrainBytes;
 344   static int    LogOfHRGrainWords;
 345 
 346   static size_t GrainBytes;
 347   static size_t GrainWords;
 348   static size_t CardsPerRegion;
 349 
 350   static size_t align_up_to_region_byte_size(size_t sz) {
 351     return (sz + (size_t) GrainBytes - 1) &
 352                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
 353   }


 368     NoteEndClaimValue          = 2,
 369     ScrubRemSetClaimValue      = 3,
 370     ParVerifyClaimValue        = 4,
 371     RebuildRSClaimValue        = 5,
 372     ParEvacFailureClaimValue   = 6,
 373     AggregateCountClaimValue   = 7,
 374     VerifyCountClaimValue      = 8,
 375     ParMarkRootClaimValue      = 9
 376   };
 377 
 378   // All allocated blocks are occupied by objects in a HeapRegion
 379   bool block_is_obj(const HeapWord* p) const;
 380 
 381   // Returns the object size for all valid block starts
 382   // and the amount of unallocated words if called on top()
 383   size_t block_size(const HeapWord* p) const;
 384 
 385   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
 386   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 387 
 388   // If this region is a member of a HeapRegionManager, the index in that
 389   // sequence, otherwise -1.
 390   uint hrm_index() const { return _hrm_index; }
 391 
 392   // The number of bytes marked live in the region in the last marking phase.
 393   size_t marked_bytes()    { return _prev_marked_bytes; }
 394   size_t live_bytes() {
 395     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 396   }
 397 
 398   // The number of bytes counted in the next marking.
 399   size_t next_marked_bytes() { return _next_marked_bytes; }
 400   // The number of bytes live wrt the next marking.
 401   size_t next_live_bytes() {
 402     return
 403       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 404   }
 405 
 406   // A lower bound on the amount of garbage bytes in the region.
 407   size_t garbage_bytes() {
 408     size_t used_at_mark_start_bytes =
 409       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
 410     assert(used_at_mark_start_bytes >= marked_bytes(),


 441   HeapRegion* humongous_start_region() const {
 442     return _humongous_start_region;
 443   }
 444 
 445   // Return the number of distinct regions that are covered by this region:
 446   // 1 if the region is not humongous, >= 1 if the region is humongous.
 447   uint region_num() const {
 448     if (!isHumongous()) {
 449       return 1U;
 450     } else {
 451       assert(startsHumongous(), "doesn't make sense on HC regions");
 452       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 453       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 454     }
 455   }
 456 
 457   // Return the index + 1 of the last HC regions that's associated
 458   // with this HS region.
 459   uint last_hc_index() const {
 460     assert(startsHumongous(), "don't call this otherwise");
 461     return hrm_index() + region_num();
 462   }
 463 
 464   // Same as Space::is_in_reserved, but will use the original size of the region.
 465   // The original size is different only for start humongous regions. They get
 466   // their _end set up to be the end of the last continues region of the
 467   // corresponding humongous object.
 468   bool is_in_reserved_raw(const void* p) const {
 469     return _bottom <= p && p < _orig_end;
 470   }
 471 
 472   // Makes the current region be a "starts humongous" region, i.e.,
 473   // the first region in a series of one or more contiguous regions
 474   // that will contain a single "humongous" object. The two parameters
 475   // are as follows:
 476   //
 477   // new_top : The new value of the top field of this region which
 478   // points to the end of the humongous object that's being
 479   // allocated. If there is more than one region in the series, top
 480   // will lie beyond this region's original end field and on the last
 481   // region in the series.


 796   // vo == UseMarkWord    -> use the mark word in the object header
 797   //
 798   // NOTE: Only the "prev" marking information is guaranteed to be
 799   // consistent most of the time, so most calls to this should use
 800   // vo == UsePrevMarking.
 801   // Currently, there is only one case where this is called with
 802   // vo == UseNextMarking, which is to verify the "next" marking
 803   // information at the end of remark.
 804   // Currently there is only one place where this is called with
 805   // vo == UseMarkWord, which is to verify the marking during a
 806   // full GC.
 807   void verify(VerifyOption vo, bool *failures) const;
 808 
 809   // Override; it uses the "prev" marking information
 810   virtual void verify() const;
 811 };
 812 
 813 // HeapRegionClosure is used for iterating over regions.
 814 // Terminates the iteration when the "doHeapRegion" method returns "true".
 815 class HeapRegionClosure : public StackObj {
 816   friend class HeapRegionManager;
 817   friend class G1CollectedHeap;
 818 
 819   bool _complete;
 820   void incomplete() { _complete = false; }
 821 
 822  public:
 823   HeapRegionClosure(): _complete(true) {}
 824 
 825   // Typically called on each region until it returns true.
 826   virtual bool doHeapRegion(HeapRegion* r) = 0;
 827 
 828   // True after iteration if the closure was applied to all heap regions
 829   // and returned "false" in all cases.
 830   bool complete() { return _complete; }
 831 };
 832 
 833 #endif // INCLUDE_ALL_GCS
 834 
 835 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP