84 // looking for objects and applying the oop closure (_cl) to
85 // them. The base implementation of this treats the area as
86 // blocks, where a block may or may not be an object. Sub-
87 // classes should override this to provide more accurate
88 // or possibly more efficient walking.
89 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
90
91 public:
92 HeapRegionDCTOC(G1CollectedHeap* g1,
93 HeapRegion* hr, ExtendedOopClosure* cl,
94 CardTableModRefBS::PrecisionStyle precision,
95 FilterKind fk);
96 };
97
98 // The complicating factor is that BlockOffsetTable diverged
99 // significantly, and we need functionality that is only in the G1 version.
100 // So I copied that code, which led to an alternate G1 version of
101 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
102 // be reconciled, then G1OffsetTableContigSpace could go away.
103
104 // The idea behind time stamps is the following. Doing a save_marks on
105 // all regions at every GC pause is time consuming (if I remember
106 // well, 10ms or so). So, we would like to do that only for regions
107 // that are GC alloc regions. To achieve this, we use time
108 // stamps. For every evacuation pause, G1CollectedHeap generates a
109 // unique time stamp (essentially a counter that gets
110 // incremented). Every time we want to call save_marks on a region,
111 // we set the saved_mark_word to top and also copy the current GC
112 // time stamp to the time stamp field of the space. Reading the
113 // saved_mark_word involves checking the time stamp of the
114 // region. If it is the same as the current GC time stamp, then we
115 // can safely read the saved_mark_word field, as it is valid. If the
116 // time stamp of the region is not the same as the current GC time
117 // stamp, then we instead read top, as the saved_mark_word field is
118 // invalid. Time stamps (on the regions and also on the
119 // G1CollectedHeap) are reset at every cleanup (we iterate over
120 // the regions anyway) and at the end of a Full GC. The current scheme
121 // that uses sequential unsigned ints will fail only if we have 4b
122 // evacuation pauses between two cleanups, which is _highly_ unlikely.
123 class G1OffsetTableContigSpace: public CompactibleSpace {
124 friend class VMStructs;
125 HeapWord* _top;
126 protected:
127 G1BlockOffsetArrayContigSpace _offsets;
128 Mutex _par_alloc_lock;
129 volatile unsigned _gc_time_stamp;
130 // When we need to retire an allocation region, while other threads
131 // are also concurrently trying to allocate into it, we typically
132 // allocate a dummy object at the end of the region to ensure that
133 // no more allocations can take place in it. However, sometimes we
134 // want to know where the end of the last "real" object we allocated
135 // into the region was and this is what this keeps track.
136 HeapWord* _pre_dummy_top;
137
138 public:
139 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
140 MemRegion mr);
141
142 void set_top(HeapWord* value) { _top = value; }
143 HeapWord* top() const { return _top; }
144
145 protected:
149 HeapWord** top_addr() { return &_top; }
150 // Allocation helpers (return NULL if full).
151 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
152 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
153
154 public:
155 void reset_after_compaction() { set_top(compaction_top()); }
156
157 size_t used() const { return byte_size(bottom(), top()); }
158 size_t free() const { return byte_size(top(), end()); }
159 bool is_free_block(const HeapWord* p) const { return p >= top(); }
160
161 MemRegion used_region() const { return MemRegion(bottom(), top()); }
162
163 void object_iterate(ObjectClosure* blk);
164 void safe_object_iterate(ObjectClosure* blk);
165
166 void set_bottom(HeapWord* value);
167 void set_end(HeapWord* value);
168
169 virtual HeapWord* saved_mark_word() const;
170 void record_top_and_timestamp();
171 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
172 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
173
174 // See the comment above in the declaration of _pre_dummy_top for an
175 // explanation of what it is.
176 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
177 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
178 _pre_dummy_top = pre_dummy_top;
179 }
180 HeapWord* pre_dummy_top() {
181 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
182 }
183 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
184
185 virtual void clear(bool mangle_space);
186
187 HeapWord* block_start(const void* p);
188 HeapWord* block_start_const(const void* p) const;
189
190 // Add offset table update.
191 virtual HeapWord* allocate(size_t word_size);
192 HeapWord* par_allocate(size_t word_size);
193
194 // MarkSweep support phase3
195 virtual HeapWord* initialize_threshold();
196 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
197
198 virtual void print() const;
199
200 void reset_bot() {
201 _offsets.reset_bot();
202 }
203
204 void print_bot_on(outputStream* out) {
205 _offsets.print_on(out);
206 }
207 };
208
209 class HeapRegion: public G1OffsetTableContigSpace {
210 friend class VMStructs;
211 // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
212 template <typename SpaceType>
|
84 // looking for objects and applying the oop closure (_cl) to
85 // them. The base implementation of this treats the area as
86 // blocks, where a block may or may not be an object. Sub-
87 // classes should override this to provide more accurate
88 // or possibly more efficient walking.
89 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
90
91 public:
92 HeapRegionDCTOC(G1CollectedHeap* g1,
93 HeapRegion* hr, ExtendedOopClosure* cl,
94 CardTableModRefBS::PrecisionStyle precision,
95 FilterKind fk);
96 };
97
98 // The complicating factor is that BlockOffsetTable diverged
99 // significantly, and we need functionality that is only in the G1 version.
100 // So I copied that code, which led to an alternate G1 version of
101 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
102 // be reconciled, then G1OffsetTableContigSpace could go away.
103
104 // The idea behind time stamps is the following. We want to keep track of
105 // the highest address where it's safe to scan objects for each region.
106 // This is only relevant for current GC alloc regions so we keep a time stamp
107 // per region to determine if the region has been allocated during the current
108 // GC or not. If the time stamp is current we report a scan_top value which
109 // was saved at the end of the previous GC for retained alloc regions and which is
110 // equal to the bottom for all other regions.
111 // There is a race between card scanners and allocating gc workers where we must ensure
112 // that card scanners do not read the memory allocated by the gc workers.
113 // In order to enforce that, we must not return a value of _top which is more recent than the
114 // time stamp. This is due to the fact that a region may become a gc alloc region at
115 // some point after we've read the timestamp value as being < the current time stamp.
116 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
117 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
118 // evacuation pauses between two cleanups, which is _highly_ unlikely.
119 class G1OffsetTableContigSpace: public CompactibleSpace {
120 friend class VMStructs;
121 HeapWord* _top;
122 HeapWord* volatile _scan_top;
123 protected:
124 G1BlockOffsetArrayContigSpace _offsets;
125 Mutex _par_alloc_lock;
126 volatile unsigned _gc_time_stamp;
127 // When we need to retire an allocation region, while other threads
128 // are also concurrently trying to allocate into it, we typically
129 // allocate a dummy object at the end of the region to ensure that
130 // no more allocations can take place in it. However, sometimes we
131 // want to know where the end of the last "real" object we allocated
132 // into the region was and this is what this keeps track.
133 HeapWord* _pre_dummy_top;
134
135 public:
136 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
137 MemRegion mr);
138
139 void set_top(HeapWord* value) { _top = value; }
140 HeapWord* top() const { return _top; }
141
142 protected:
146 HeapWord** top_addr() { return &_top; }
147 // Allocation helpers (return NULL if full).
148 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
149 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
150
151 public:
152 void reset_after_compaction() { set_top(compaction_top()); }
153
154 size_t used() const { return byte_size(bottom(), top()); }
155 size_t free() const { return byte_size(top(), end()); }
156 bool is_free_block(const HeapWord* p) const { return p >= top(); }
157
158 MemRegion used_region() const { return MemRegion(bottom(), top()); }
159
160 void object_iterate(ObjectClosure* blk);
161 void safe_object_iterate(ObjectClosure* blk);
162
163 void set_bottom(HeapWord* value);
164 void set_end(HeapWord* value);
165
166 HeapWord* scan_top() const;
167 void record_timestamp();
168 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
169 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
170 void record_retained_region();
171
172 // See the comment above in the declaration of _pre_dummy_top for an
173 // explanation of what it is.
174 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
175 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
176 _pre_dummy_top = pre_dummy_top;
177 }
178 HeapWord* pre_dummy_top() {
179 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
180 }
181 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
182
183 virtual void clear(bool mangle_space);
184
185 HeapWord* block_start(const void* p);
186 HeapWord* block_start_const(const void* p) const;
187
188 // Add offset table update.
189 virtual HeapWord* allocate(size_t word_size);
190 HeapWord* par_allocate(size_t word_size);
191
192 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
193
194 // MarkSweep support phase3
195 virtual HeapWord* initialize_threshold();
196 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
197
198 virtual void print() const;
199
200 void reset_bot() {
201 _offsets.reset_bot();
202 }
203
204 void print_bot_on(outputStream* out) {
205 _offsets.print_on(out);
206 }
207 };
208
209 class HeapRegion: public G1OffsetTableContigSpace {
210 friend class VMStructs;
211 // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
212 template <typename SpaceType>
|