< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page

        

@@ -113,16 +113,16 @@
 // time stamp. This is due to the fact that a region may become a gc alloc region at
 // some point after we've read the timestamp value as being < the current time stamp.
 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
-class G1OffsetTableContigSpace: public CompactibleSpace {
+class G1ContiguousSpace: public CompactibleSpace {
   friend class VMStructs;
   HeapWord* volatile _top;
   HeapWord* volatile _scan_top;
  protected:
-  G1BlockOffsetArrayContigSpace _offsets;
+  G1BlockOffsetTablePart _bot_part;
   Mutex _par_alloc_lock;
   volatile unsigned _gc_time_stamp;
   // When we need to retire an allocation region, while other threads
   // are also concurrently trying to allocate into it, we typically
   // allocate a dummy object at the end of the region to ensure that

@@ -130,18 +130,17 @@
   // want to know where the end of the last "real" object we allocated
   // into the region was and this is what this keeps track.
   HeapWord* _pre_dummy_top;
 
  public:
-  G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
-                           MemRegion mr);
+  G1ContiguousSpace(G1BlockOffsetTable* bot);
 
   void set_top(HeapWord* value) { _top = value; }
   HeapWord* top() const { return _top; }
 
  protected:
-  // Reset the G1OffsetTableContigSpace.
+  // Reset the G1ContiguousSpace.
   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 
   HeapWord* volatile* top_addr() { return &_top; }
   // Try to allocate at least min_word_size and up to desired_size from this Space.
   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of

@@ -165,13 +164,10 @@
   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 
   void object_iterate(ObjectClosure* blk);
   void safe_object_iterate(ObjectClosure* blk);
 
-  void set_bottom(HeapWord* value);
-  void set_end(HeapWord* value);
-
   void mangle_unused_area() PRODUCT_RETURN;
   void mangle_unused_area_complete() PRODUCT_RETURN;
 
   HeapWord* scan_top() const;
   void record_timestamp();

@@ -211,19 +207,19 @@
   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 
   virtual void print() const;
 
   void reset_bot() {
-    _offsets.reset_bot();
+    _bot_part.reset_bot();
   }
 
   void print_bot_on(outputStream* out) {
-    _offsets.print_on(out);
+    _bot_part.print_on(out);
   }
 };
 
-class HeapRegion: public G1OffsetTableContigSpace {
+class HeapRegion: public G1ContiguousSpace {
   friend class VMStructs;
   // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
   template <typename SpaceType>
   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
  private:

@@ -231,12 +227,10 @@
   // The remembered set for this region.
   // (Might want to make this "inline" later, to avoid some alloc failure
   // issues.)
   HeapRegionRemSet* _rem_set;
 
-  G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
-
   // Auxiliary functions for scan_and_forward support.
   // See comments for CompactibleSpace for more information.
   inline HeapWord* scan_limit() const {
     return top();
   }

@@ -328,11 +322,11 @@
   // the total value for the collection set.
   size_t _predicted_bytes_to_copy;
 
  public:
   HeapRegion(uint hrm_index,
-             G1BlockOffsetSharedArray* sharedOffsetArray,
+             G1BlockOffsetTable* bot,
              MemRegion mr);
 
   // Initializing the HeapRegion not only resets the data structure, but also
   // resets the BOT for that heap region.
   // The default values for clear_space means that we will do the clearing if
< prev index next >