src/share/vm/gc_implementation/g1/heapRegion.hpp

Print this page
rev 6589 : 8047818: G1 HeapRegions can no longer be ContiguousSpaces
Reviewed-by:


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
  27 
  28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
  30 #include "gc_implementation/g1/survRateGroup.hpp"
  31 #include "gc_implementation/shared/ageTable.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "memory/space.inline.hpp"
  34 #include "memory/watermark.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 #if INCLUDE_ALL_GCS
  38 
  39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  40 // can be collected independently.
  41 
  42 // NOTE: Although a HeapRegion is a Space, its
  43 // Space::initDirtyCardClosure method must not be called.
  44 // The problem is that the existence of this method breaks
  45 // the independence of barrier sets from remembered sets.
  46 // The solution is to remove this method from the definition
  47 // of a Space.
  48 
  49 class CompactibleSpace;
  50 class ContiguousSpace;
  51 class HeapRegionRemSet;
  52 class HeapRegionRemSetIterator;
  53 class HeapRegion;
  54 class HeapRegionSetBase;
  55 class nmethod;
  56 
  57 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
  58 #define HR_FORMAT_PARAMS(_hr_) \
  59                 (_hr_)->hrs_index(), \
  60                 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
  61                 (_hr_)->startsHumongous() ? "HS" : \
  62                 (_hr_)->continuesHumongous() ? "HC" : \
  63                 !(_hr_)->is_empty() ? "O" : "F", \
  64                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  65 
  66 // sentinel value for hrs_index
  67 #define G1_NULL_HRS_INDEX ((uint) -1)
  68 
  69 // A dirty card to oop closure for heap regions. It
  70 // knows how to get the G1 heap and how to use the bitmap


 108 
 109 // The idea behind time stamps is the following. Doing a save_marks on
 110 // all regions at every GC pause is time consuming (if I remember
 111 // well, 10ms or so). So, we would like to do that only for regions
 112 // that are GC alloc regions. To achieve this, we use time
 113 // stamps. For every evacuation pause, G1CollectedHeap generates a
 114 // unique time stamp (essentially a counter that gets
 115 // incremented). Every time we want to call save_marks on a region,
 116 // we set the saved_mark_word to top and also copy the current GC
 117 // time stamp to the time stamp field of the space. Reading the
 118 // saved_mark_word involves checking the time stamp of the
 119 // region. If it is the same as the current GC time stamp, then we
 120 // can safely read the saved_mark_word field, as it is valid. If the
 121 // time stamp of the region is not the same as the current GC time
 122 // stamp, then we instead read top, as the saved_mark_word field is
 123 // invalid. Time stamps (on the regions and also on the
 124 // G1CollectedHeap) are reset at every cleanup (we iterate over
 125 // the regions anyway) and at the end of a Full GC. The current scheme
 126 // that uses sequential unsigned ints will fail only if we have 4b
 127 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 128 
 129 class G1OffsetTableContigSpace: public ContiguousSpace {
 130   friend class VMStructs;

 131  protected:


 132   G1BlockOffsetArrayContigSpace _offsets;
 133   Mutex _par_alloc_lock;
 134   volatile unsigned _gc_time_stamp;
 135   // When we need to retire an allocation region, while other threads
 136   // are also concurrently trying to allocate into it, we typically
 137   // allocate a dummy object at the end of the region to ensure that
 138   // no more allocations can take place in it. However, sometimes we
 139   // want to know where the end of the last "real" object we allocated
 140   // into the region was and this is what this keeps track.
 141   HeapWord* _pre_dummy_top;
 142 
 143  public:
 144   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 145                            MemRegion mr);
 146 













 147   void set_bottom(HeapWord* value);
 148   void set_end(HeapWord* value);
 149 
 150   virtual HeapWord* saved_mark_word() const;
 151   void record_top_and_timestamp();
 152   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 153   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 154 
 155   // See the comment above in the declaration of _pre_dummy_top for an
 156   // explanation of what it is.
 157   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 158     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 159     _pre_dummy_top = pre_dummy_top;
 160   }
 161   HeapWord* pre_dummy_top() {
 162     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 163   }
 164   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 165 
 166   virtual void clear(bool mangle_space);
 167 
 168   HeapWord* block_start(const void* p);
 169   HeapWord* block_start_const(const void* p) const;
 170 


 171   // Add offset table update.
 172   virtual HeapWord* allocate(size_t word_size);
 173   HeapWord* par_allocate(size_t word_size);
 174 
 175   // MarkSweep support phase3
 176   virtual HeapWord* initialize_threshold();
 177   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 178 
 179   virtual void print() const;
 180 
 181   void reset_bot() {
 182     _offsets.zero_bottom_entry();
 183     _offsets.initialize_threshold();
 184   }
 185 
 186   void update_bot_for_object(HeapWord* start, size_t word_size) {
 187     _offsets.alloc_block(start, word_size);
 188   }
 189 
 190   void print_bot_on(outputStream* out) {


 336   // well as other related fields that are based on the heap region
 337   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
 338   // CardsPerRegion). All those fields are considered constant
 339   // throughout the JVM's execution, therefore they should only be set
 340   // up once during initialization time.
 341   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 342 
 343   enum ClaimValues {
 344     InitialClaimValue          = 0,
 345     FinalCountClaimValue       = 1,
 346     NoteEndClaimValue          = 2,
 347     ScrubRemSetClaimValue      = 3,
 348     ParVerifyClaimValue        = 4,
 349     RebuildRSClaimValue        = 5,
 350     ParEvacFailureClaimValue   = 6,
 351     AggregateCountClaimValue   = 7,
 352     VerifyCountClaimValue      = 8,
 353     ParMarkRootClaimValue      = 9
 354   };
 355 
 356   inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
 357     assert(is_young(), "we can only skip BOT updates on young regions");
 358     return ContiguousSpace::par_allocate(word_size);
 359   }
 360   inline HeapWord* allocate_no_bot_updates(size_t word_size) {
 361     assert(is_young(), "we can only skip BOT updates on young regions");
 362     return ContiguousSpace::allocate(word_size);
 363   }
 364 
 365   // If this region is a member of a HeapRegionSeq, the index in that
 366   // sequence, otherwise -1.
 367   uint hrs_index() const { return _hrs_index; }
 368 
 369   // The number of bytes marked live in the region in the last marking phase.
 370   size_t marked_bytes()    { return _prev_marked_bytes; }
 371   size_t live_bytes() {
 372     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 373   }
 374 
 375   // The number of bytes counted in the next marking.
 376   size_t next_marked_bytes() { return _next_marked_bytes; }
 377   // The number of bytes live wrt the next marking.
 378   size_t next_live_bytes() {
 379     return
 380       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 381   }
 382 
 383   // A lower bound on the amount of garbage bytes in the region.




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
  27 
  28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
  29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
  30 #include "gc_implementation/g1/survRateGroup.hpp"
  31 #include "gc_implementation/shared/ageTable.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "memory/space.inline.hpp"
  34 #include "memory/watermark.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 #if INCLUDE_ALL_GCS
  38 
  39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  40 // can be collected independently.
  41 
  42 // NOTE: Although a HeapRegion is a Space, its
  43 // Space::initDirtyCardClosure method must not be called.
  44 // The problem is that the existence of this method breaks
  45 // the independence of barrier sets from remembered sets.
  46 // The solution is to remove this method from the definition
  47 // of a Space.
  48 


  49 class HeapRegionRemSet;
  50 class HeapRegionRemSetIterator;
  51 class HeapRegion;
  52 class HeapRegionSetBase;
  53 class nmethod;
  54 
  55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
  56 #define HR_FORMAT_PARAMS(_hr_) \
  57                 (_hr_)->hrs_index(), \
  58                 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
  59                 (_hr_)->startsHumongous() ? "HS" : \
  60                 (_hr_)->continuesHumongous() ? "HC" : \
  61                 !(_hr_)->is_empty() ? "O" : "F", \
  62                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  63 
  64 // sentinel value for hrs_index
  65 #define G1_NULL_HRS_INDEX ((uint) -1)
  66 
  67 // A dirty card to oop closure for heap regions. It
  68 // knows how to get the G1 heap and how to use the bitmap


 106 
 107 // The idea behind time stamps is the following. Doing a save_marks on
 108 // all regions at every GC pause is time consuming (if I remember
 109 // well, 10ms or so). So, we would like to do that only for regions
 110 // that are GC alloc regions. To achieve this, we use time
 111 // stamps. For every evacuation pause, G1CollectedHeap generates a
 112 // unique time stamp (essentially a counter that gets
 113 // incremented). Every time we want to call save_marks on a region,
 114 // we set the saved_mark_word to top and also copy the current GC
 115 // time stamp to the time stamp field of the space. Reading the
 116 // saved_mark_word involves checking the time stamp of the
 117 // region. If it is the same as the current GC time stamp, then we
 118 // can safely read the saved_mark_word field, as it is valid. If the
 119 // time stamp of the region is not the same as the current GC time
 120 // stamp, then we instead read top, as the saved_mark_word field is
 121 // invalid. Time stamps (on the regions and also on the
 122 // G1CollectedHeap) are reset at every cleanup (we iterate over
 123 // the regions anyway) and at the end of a Full GC. The current scheme
 124 // that uses sequential unsigned ints will fail only if we have 4b
 125 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 126 class G1OffsetTableContigSpace: public CompactibleSpace {

 127   friend class VMStructs;
 128   HeapWord* _top;
 129  protected:
 130   inline HeapWord* cas_allocate_inner(size_t size);
 131   inline HeapWord* allocate_inner(size_t size);
 132   G1BlockOffsetArrayContigSpace _offsets;
 133   Mutex _par_alloc_lock;
 134   volatile unsigned _gc_time_stamp;
 135   // When we need to retire an allocation region, while other threads
 136   // are also concurrently trying to allocate into it, we typically
 137   // allocate a dummy object at the end of the region to ensure that
 138   // no more allocations can take place in it. However, sometimes we
 139   // want to know where the end of the last "real" object we allocated
 140   // into the region was and this is what this keeps track.
 141   HeapWord* _pre_dummy_top;
 142 
 143  public:
 144   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 145                            MemRegion mr);
 146 
 147   inline void set_top(HeapWord* value) { _top = value; }
 148   HeapWord* top() const { return _top; }
 149   void reset_after_compaction() { set_top(compaction_top()); }
 150 
 151   size_t used() const { return byte_size(bottom(), top()); }
 152   size_t free() const { return byte_size(top(), end()); }
 153   bool is_free_block(const HeapWord* p) const { return p >= top(); }
 154 
 155   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 156 
 157   void object_iterate(ObjectClosure* blk);
 158   void safe_object_iterate(ObjectClosure* blk);
 159 
 160   void set_bottom(HeapWord* value);
 161   void set_end(HeapWord* value);
 162 
 163   virtual HeapWord* saved_mark_word() const;
 164   void record_top_and_timestamp();
 165   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
 166   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 167 
 168   // See the comment above in the declaration of _pre_dummy_top for an
 169   // explanation of what it is.
 170   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
 171     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
 172     _pre_dummy_top = pre_dummy_top;
 173   }
 174   HeapWord* pre_dummy_top() {
 175     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
 176   }
 177   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
 178 
 179   virtual void clear(bool mangle_space);
 180 
 181   HeapWord* block_start(const void* p);
 182   HeapWord* block_start_const(const void* p) const;
 183 
 184   void prepare_for_compaction(CompactPoint* cp);
 185 
 186   // Add offset table update.
 187   virtual HeapWord* allocate(size_t word_size);
 188   HeapWord* par_allocate(size_t word_size);
 189 
 190   // MarkSweep support phase3
 191   virtual HeapWord* initialize_threshold();
 192   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 193 
 194   virtual void print() const;
 195 
 196   void reset_bot() {
 197     _offsets.zero_bottom_entry();
 198     _offsets.initialize_threshold();
 199   }
 200 
 201   void update_bot_for_object(HeapWord* start, size_t word_size) {
 202     _offsets.alloc_block(start, word_size);
 203   }
 204 
 205   void print_bot_on(outputStream* out) {


 351   // well as other related fields that are based on the heap region
 352   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
 353   // CardsPerRegion). All those fields are considered constant
 354   // throughout the JVM's execution, therefore they should only be set
 355   // up once during initialization time.
 356   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 357 
 358   enum ClaimValues {
 359     InitialClaimValue          = 0,
 360     FinalCountClaimValue       = 1,
 361     NoteEndClaimValue          = 2,
 362     ScrubRemSetClaimValue      = 3,
 363     ParVerifyClaimValue        = 4,
 364     RebuildRSClaimValue        = 5,
 365     ParEvacFailureClaimValue   = 6,
 366     AggregateCountClaimValue   = 7,
 367     VerifyCountClaimValue      = 8,
 368     ParMarkRootClaimValue      = 9
 369   };
 370 
 371   bool block_is_obj(const HeapWord* p) const;
 372   size_t block_size(const HeapWord* p) const;
 373 
 374   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
 375   inline HeapWord* allocate_no_bot_updates(size_t word_size);



 376 
 377   // If this region is a member of a HeapRegionSeq, the index in that
 378   // sequence, otherwise -1.
 379   uint hrs_index() const { return _hrs_index; }
 380 
 381   // The number of bytes marked live in the region in the last marking phase.
 382   size_t marked_bytes()    { return _prev_marked_bytes; }
 383   size_t live_bytes() {
 384     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 385   }
 386 
 387   // The number of bytes counted in the next marking.
 388   size_t next_marked_bytes() { return _next_marked_bytes; }
 389   // The number of bytes live wrt the next marking.
 390   size_t next_live_bytes() {
 391     return
 392       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 393   }
 394 
 395   // A lower bound on the amount of garbage bytes in the region.