14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_HEAPREGION_HPP
26 #define SHARE_GC_G1_HEAPREGION_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.hpp"
29 #include "gc/g1/g1HeapRegionTraceType.hpp"
30 #include "gc/g1/heapRegionTracer.hpp"
31 #include "gc/g1/heapRegionType.hpp"
32 #include "gc/g1/survRateGroup.hpp"
33 #include "gc/shared/ageTable.hpp"
34 #include "gc/shared/cardTable.hpp"
35 #include "gc/shared/verifyOption.hpp"
36 #include "gc/shared/spaceDecorator.hpp"
37 #include "utilities/macros.hpp"
38
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
41
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
48
49 // Each heap region is self contained. top() and end() can never
50 // be set beyond the end of the region. For humongous objects,
51 // the first region is a StartsHumongous region. If the humongous
52 // object is larger than a heap region, the following regions will
53 // be of type ContinuesHumongous. In this case the top() of the
54 // StartHumongous region and all ContinuesHumongous regions except
55 // the last will point to their own end. The last ContinuesHumongous
56 // region may have top() equal the end of object if there isn't
57 // room for filler objects to pad out to the end of the region.
58
59 class G1CollectedHeap;
60 class G1CMBitMap;
61 class G1IsAliveAndApplyClosure;
62 class HeapRegionRemSet;
63 class HeapRegion;
64 class HeapRegionSetBase;
65 class nmethod;
66
67 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
68 #define HR_FORMAT_PARAMS(_hr_) \
69 (_hr_)->hrm_index(), \
70 (_hr_)->get_short_type_str(), \
71 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
72
73 // sentinel value for hrm_index
74 #define G1_NO_HRM_INDEX ((uint) -1)
75
76 // The complicating factor is that BlockOffsetTable diverged
77 // significantly, and we need functionality that is only in the G1 version.
78 // So I copied that code, which led to an alternate G1 version of
79 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
80 // be reconciled, then G1OffsetTableContigSpace could go away.
81
82 // The idea behind time stamps is the following. We want to keep track of
83 // the highest address where it's safe to scan objects for each region.
84 // This is only relevant for current GC alloc regions so we keep a time stamp
85 // per region to determine if the region has been allocated during the current
86 // GC or not. If the time stamp is current we report a scan_top value which
87 // was saved at the end of the previous GC for retained alloc regions and which is
88 // equal to the bottom for all other regions.
89 // There is a race between card scanners and allocating gc workers where we must ensure
90 // that card scanners do not read the memory allocated by the gc workers.
91 // In order to enforce that, we must not return a value of _top which is more recent than the
92 // time stamp. This is due to the fact that a region may become a gc alloc region at
93 // some point after we've read the timestamp value as being < the current time stamp.
94 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
95 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
96 // evacuation pauses between two cleanups, which is _highly_ unlikely.
97 class G1ContiguousSpace: public CompactibleSpace {
98 friend class VMStructs;
99 HeapWord* volatile _top;
100 protected:
101 G1BlockOffsetTablePart _bot_part;
102 Mutex _par_alloc_lock;
103 // When we need to retire an allocation region, while other threads
104 // are also concurrently trying to allocate into it, we typically
105 // allocate a dummy object at the end of the region to ensure that
106 // no more allocations can take place in it. However, sometimes we
107 // want to know where the end of the last "real" object we allocated
108 // into the region was and this is what this keeps track.
109 HeapWord* _pre_dummy_top;
110
111 public:
112 G1ContiguousSpace(G1BlockOffsetTable* bot);
113
114 void set_top(HeapWord* value) { _top = value; }
115 HeapWord* top() const { return _top; }
116
117 protected:
118 // Reset the G1ContiguousSpace.
119 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
120
121 HeapWord* volatile* top_addr() { return &_top; }
122 // Try to allocate at least min_word_size and up to desired_size from this Space.
123 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
124 // space allocated.
125 // This version assumes that all allocation requests to this Space are properly
126 // synchronized.
127 inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
128 // Try to allocate at least min_word_size and up to desired_size from this Space.
129 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
130 // space allocated.
131 // This version synchronizes with other calls to par_allocate_impl().
132 inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
133
134 public:
135 void reset_after_compaction() { set_top(compaction_top()); }
136
137 size_t used() const { return byte_size(bottom(), top()); }
138 size_t free() const { return byte_size(top(), end()); }
139 bool is_free_block(const HeapWord* p) const { return p >= top(); }
140
141 MemRegion used_region() const { return MemRegion(bottom(), top()); }
142
143 void object_iterate(ObjectClosure* blk);
144 void safe_object_iterate(ObjectClosure* blk);
145
146 void mangle_unused_area() PRODUCT_RETURN;
147 void mangle_unused_area_complete() PRODUCT_RETURN;
148
149 // See the comment above in the declaration of _pre_dummy_top for an
150 // explanation of what it is.
151 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
152 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
153 _pre_dummy_top = pre_dummy_top;
154 }
155 HeapWord* pre_dummy_top() {
156 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
157 }
158 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
159
160 virtual void clear(bool mangle_space);
161
162 HeapWord* block_start(const void* p);
163 HeapWord* block_start_const(const void* p) const;
164
165 // Allocation (return NULL if full). Assumes the caller has established
166 // mutually exclusive access to the space.
167 HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
168 // Allocation (return NULL if full). Enforces mutual exclusion internally.
169 HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
170
171 virtual HeapWord* allocate(size_t word_size);
172 virtual HeapWord* par_allocate(size_t word_size);
173
174 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
175
176 // MarkSweep support phase3
177 virtual HeapWord* initialize_threshold();
178 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
179
180 virtual void print() const;
181
182 void reset_bot() {
183 _bot_part.reset_bot();
184 }
185
186 void print_bot_on(outputStream* out) {
187 _bot_part.print_on(out);
188 }
189 };
190
191 class HeapRegion: public G1ContiguousSpace {
192 friend class VMStructs;
193 // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
194 template <typename SpaceType>
195 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
196 private:
197
198 // The remembered set for this region.
199 // (Might want to make this "inline" later, to avoid some alloc failure
200 // issues.)
201 HeapRegionRemSet* _rem_set;
202
203 // Auxiliary functions for scan_and_forward support.
204 // See comments for CompactibleSpace for more information.
205 inline HeapWord* scan_limit() const {
206 return top();
207 }
208
209 inline bool scanned_block_is_obj(const HeapWord* addr) const {
210 return true; // Always true, since scan_limit is top
211 }
212
213 inline size_t scanned_block_size(const HeapWord* addr) const {
214 return HeapRegion::block_size(addr); // Avoid virtual call
215 }
216
217 void report_region_type_change(G1HeapRegionTraceType::Type to);
218
219 // Returns whether the given object address refers to a dead object, and either the
220 // size of the object (if live) or the size of the block (if dead) in size.
221 // May
222 // - only called with obj < top()
223 // - not called on humongous objects or archive regions
224 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
225
226 protected:
227 // The index of this region in the heap region sequence.
228 uint _hrm_index;
229
230 HeapRegionType _type;
231
232 // For a humongous region, region in which it starts.
233 HeapRegion* _humongous_start_region;
234
235 // True iff an attempt to evacuate an object in the region failed.
236 bool _evacuation_failed;
237
238 // Fields used by the HeapRegionSetBase class and subclasses.
239 HeapRegion* _next;
240 HeapRegion* _prev;
241 #ifdef ASSERT
242 HeapRegionSetBase* _containing_set;
243 #endif // ASSERT
244
245 // We use concurrent marking to determine the amount of live data
246 // in each heap region.
252
253 static const uint InvalidCSetIndex = UINT_MAX;
254
255 // The index in the optional regions array, if this region
256 // is considered optional during a mixed collections.
257 uint _index_in_opt_cset;
258
259 // Data for young region survivor prediction.
260 uint _young_index_in_cset;
261 SurvRateGroup* _surv_rate_group;
262 int _age_index;
263
264 // The start of the unmarked area. The unmarked area extends from this
265 // word until the top and/or end of the region, and is the part
266 // of the region for which no marking was done, i.e. objects may
267 // have been allocated in this part since the last mark phase.
268 // "prev" is the top at the start of the last completed marking.
269 // "next" is the top at the start of the in-progress marking (if any.)
270 HeapWord* _prev_top_at_mark_start;
271 HeapWord* _next_top_at_mark_start;
272 // If a collection pause is in progress, this is the top at the start
273 // of that pause.
274
275 void init_top_at_mark_start() {
276 assert(_prev_marked_bytes == 0 &&
277 _next_marked_bytes == 0,
278 "Must be called after zero_marked_bytes.");
279 HeapWord* bot = bottom();
280 _prev_top_at_mark_start = bot;
281 _next_top_at_mark_start = bot;
282 }
283
284 // Cached attributes used in the collection set policy information
285
286 // The RSet length that was added to the total value
287 // for the collection set.
288 size_t _recorded_rs_length;
289
290 // The predicted elapsed time that was added to total value
291 // for the collection set.
292 double _predicted_elapsed_time_ms;
293
294 // Iterate over the references covered by the given MemRegion in a humongous
295 // object and apply the given closure to them.
296 // Humongous objects are allocated directly in the old-gen. So we need special
297 // handling for concurrent processing encountering an in-progress allocation.
298 // Returns the address after the last actually scanned or NULL if the area could
299 // not be scanned (That should only happen when invoked concurrently with the
300 // mutator).
301 template <class Closure, bool is_gc_active>
302 inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,
303 Closure* cl,
304 G1CollectedHeap* g1h);
305
306 // Returns the block size of the given (dead, potentially having its class unloaded) object
307 // starting at p extending to at most the prev TAMS using the given mark bitmap.
308 inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
309 public:
310 HeapRegion(uint hrm_index,
311 G1BlockOffsetTable* bot,
312 MemRegion mr);
313
314 // Initializing the HeapRegion not only resets the data structure, but also
315 // resets the BOT for that heap region.
316 // The default values for clear_space means that we will do the clearing if
317 // there's clearing to be done ourselves. We also always mangle the space.
318 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
319
320 static int LogOfHRGrainBytes;
321 static int LogOfHRGrainWords;
322 static int LogCardsPerRegion;
323
324 static size_t GrainBytes;
325 static size_t GrainWords;
326 static size_t CardsPerRegion;
327
328 static size_t align_up_to_region_byte_size(size_t sz) {
329 return (sz + (size_t) GrainBytes - 1) &
330 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
331 }
332
333
334 // Returns whether a field is in the same region as the obj it points to.
335 template <typename T>
336 static bool is_in_same_region(T* p, oop obj) {
337 assert(p != NULL, "p can't be NULL");
338 assert(obj != NULL, "obj can't be NULL");
347 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
348 // CardsPerRegion). All those fields are considered constant
349 // throughout the JVM's execution, therefore they should only be set
350 // up once during initialization time.
351 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
352
353 // All allocated blocks are occupied by objects in a HeapRegion
354 bool block_is_obj(const HeapWord* p) const;
355
356 // Returns whether the given object is dead based on TAMS and bitmap.
357 bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
358
359 // Returns the object size for all valid block starts
360 // and the amount of unallocated words if called on top()
361 size_t block_size(const HeapWord* p) const;
362
363 // Scans through the region using the bitmap to determine what
364 // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
365 template<typename ApplyToMarkedClosure>
366 inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
367 // Override for scan_and_forward support.
368 void prepare_for_compaction(CompactPoint* cp);
369 // Update heap region to be consistent after compaction.
370 void complete_compaction();
371
372 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
373 inline HeapWord* allocate_no_bot_updates(size_t word_size);
374 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
375
376 // If this region is a member of a HeapRegionManager, the index in that
377 // sequence, otherwise -1.
378 uint hrm_index() const { return _hrm_index; }
379
380 // The number of bytes marked live in the region in the last marking phase.
381 size_t marked_bytes() { return _prev_marked_bytes; }
382 size_t live_bytes() {
383 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
384 }
385
386 // The number of bytes counted in the next marking.
387 size_t next_marked_bytes() { return _next_marked_bytes; }
388 // The number of bytes live wrt the next marking.
690 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
691
692 void print() const;
693 void print_on(outputStream* st) const;
694
695 // vo == UsePrevMarking -> use "prev" marking information,
696 // vo == UseNextMarking -> use "next" marking information
697 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
698 //
699 // NOTE: Only the "prev" marking information is guaranteed to be
700 // consistent most of the time, so most calls to this should use
701 // vo == UsePrevMarking.
702 // Currently, there is only one case where this is called with
703 // vo == UseNextMarking, which is to verify the "next" marking
704 // information at the end of remark.
705 // Currently there is only one place where this is called with
706 // vo == UseFullMarking, which is to verify the marking during a
707 // full GC.
708 void verify(VerifyOption vo, bool *failures) const;
709
710 // Override; it uses the "prev" marking information
711 virtual void verify() const;
712
713 void verify_rem_set(VerifyOption vo, bool *failures) const;
714 void verify_rem_set() const;
715 };
716
717 // HeapRegionClosure is used for iterating over regions.
718 // Terminates the iteration when the "do_heap_region" method returns "true".
719 class HeapRegionClosure : public StackObj {
720 friend class HeapRegionManager;
721 friend class G1CollectionSet;
722 friend class G1CollectionSetCandidates;
723
724 bool _is_complete;
725 void set_incomplete() { _is_complete = false; }
726
727 public:
728 HeapRegionClosure(): _is_complete(true) {}
729
730 // Typically called on each region until it returns true.
731 virtual bool do_heap_region(HeapRegion* r) = 0;
732
733 // True after iteration if the closure was applied to all heap regions
734 // and returned "false" in all cases.
735 bool is_complete() { return _is_complete; }
736 };
737
738 #endif // SHARE_GC_G1_HEAPREGION_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_HEAPREGION_HPP
26 #define SHARE_GC_G1_HEAPREGION_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.hpp"
29 #include "gc/g1/g1HeapRegionTraceType.hpp"
30 #include "gc/g1/heapRegionTracer.hpp"
31 #include "gc/g1/heapRegionType.hpp"
32 #include "gc/g1/survRateGroup.hpp"
33 #include "gc/shared/ageTable.hpp"
34 #include "gc/shared/spaceDecorator.hpp"
35 #include "gc/shared/verifyOption.hpp"
36 #include "runtime/mutex.hpp"
37 #include "utilities/macros.hpp"
38
39 class G1CollectedHeap;
40 class G1CMBitMap;
41 class HeapRegionRemSet;
42 class HeapRegion;
43 class HeapRegionSetBase;
44 class nmethod;
45
46 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
47 #define HR_FORMAT_PARAMS(_hr_) \
48 (_hr_)->hrm_index(), \
49 (_hr_)->get_short_type_str(), \
50 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
51
52 // sentinel value for hrm_index
53 #define G1_NO_HRM_INDEX ((uint) -1)
54
55 // A HeapRegion is the smallest piece of a G1CollectedHeap that
56 // can be collected independently.
57
58 // Each heap region is self contained. top() and end() can never
59 // be set beyond the end of the region. For humongous objects,
60 // the first region is a StartsHumongous region. If the humongous
61 // object is larger than a heap region, the following regions will
62 // be of type ContinuesHumongous. In this case the top() of the
63 // StartHumongous region and all ContinuesHumongous regions except
64 // the last will point to their own end. The last ContinuesHumongous
65 // region may have top() equal the end of object if there isn't
66 // room for filler objects to pad out to the end of the region.
67 class HeapRegion : public CHeapObj<mtGC> {
68 friend class VMStructs;
69
70 HeapWord* _bottom;
71 HeapWord* _end;
72
73 HeapWord* volatile _top;
74 HeapWord* _compaction_top;
75
76 G1BlockOffsetTablePart _bot_part;
77 Mutex _par_alloc_lock;
78 // When we need to retire an allocation region, while other threads
79 // are also concurrently trying to allocate into it, we typically
80 // allocate a dummy object at the end of the region to ensure that
81 // no more allocations can take place in it. However, sometimes we
82 // want to know where the end of the last "real" object we allocated
83 // into the region was and this is what this keeps track.
84 HeapWord* _pre_dummy_top;
85
86 public:
87 void set_bottom(HeapWord* value) { _bottom = value; }
88 HeapWord* bottom() const { return _bottom; }
89
90 void set_end(HeapWord* value) { _end = value; }
91 HeapWord* end() const { return _end; }
92
93 void set_compaction_top(HeapWord* compaction_top) { _compaction_top = compaction_top; }
94 HeapWord* compaction_top() const { return _compaction_top; }
95
96 void set_top(HeapWord* value) { _top = value; }
97 HeapWord* top() const { return _top; }
98
99 // Returns true iff the given the heap region contains the
100 // given address as part of an allocated object. This may
101 // be a potentially, so we restrict its use to assertion checks only.
102 bool is_in(const void* p) const {
103 return is_in_reserved(p);
104 }
105 bool is_in(oop obj) const {
106 return is_in((void*)obj);
107 }
108 // Returns true iff the given reserved memory of the space contains the
109 // given address.
110 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
111
112 size_t capacity() const { return byte_size(bottom(), end()); }
113 size_t used() const { return byte_size(bottom(), top()); }
114 size_t free() const { return byte_size(top(), end()); }
115
116 bool is_empty() const { return used() == 0; }
117
118 private:
119 void reset_after_compaction() { set_top(compaction_top()); }
120
121 // Try to allocate at least min_word_size and up to desired_size from this region.
122 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
123 // space allocated.
124 // This version assumes that all allocation requests to this HeapRegion are properly
125 // synchronized.
126 inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
127 // Try to allocate at least min_word_size and up to desired_size from this HeapRegion.
128 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
129 // space allocated.
130 // This version synchronizes with other calls to par_allocate_impl().
131 inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
132
133 void mangle_unused_area() PRODUCT_RETURN;
134
135 public:
136 void object_iterate(ObjectClosure* blk);
137
138 // See the comment above in the declaration of _pre_dummy_top for an
139 // explanation of what it is.
140 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
141 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
142 _pre_dummy_top = pre_dummy_top;
143 }
144
145 HeapWord* pre_dummy_top() {
146 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
147 }
148 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
149
150 void clear(bool mangle_space);
151
152 HeapWord* block_start(const void* p);
153 HeapWord* block_start_const(const void* p) const;
154
155 // Allocation (return NULL if full). Assumes the caller has established
156 // mutually exclusive access to the HeapRegion.
157 HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
158 // Allocation (return NULL if full). Enforces mutual exclusion internally.
159 HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
160
161 HeapWord* allocate(size_t word_size);
162 HeapWord* par_allocate(size_t word_size);
163
164 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
165
166 // MarkSweep support phase3
167 HeapWord* initialize_threshold();
168 HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
169
170 void reset_bot() {
171 _bot_part.reset_bot();
172 }
173
174 void print_bot_on(outputStream* out) {
175 _bot_part.print_on(out);
176 }
177
178 private:
179 // The remembered set for this region.
180 HeapRegionRemSet* _rem_set;
181
182 void report_region_type_change(G1HeapRegionTraceType::Type to);
183
184 // Returns whether the given object address refers to a dead object, and either the
185 // size of the object (if live) or the size of the block (if dead) in size.
186 // May
187 // - only called with obj < top()
188 // - not called on humongous objects or archive regions
189 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
190
191 // The index of this region in the heap region sequence.
192 uint _hrm_index;
193
194 HeapRegionType _type;
195
196 // For a humongous region, region in which it starts.
197 HeapRegion* _humongous_start_region;
198
199 // True iff an attempt to evacuate an object in the region failed.
200 bool _evacuation_failed;
201
202 // Fields used by the HeapRegionSetBase class and subclasses.
203 HeapRegion* _next;
204 HeapRegion* _prev;
205 #ifdef ASSERT
206 HeapRegionSetBase* _containing_set;
207 #endif // ASSERT
208
209 // We use concurrent marking to determine the amount of live data
210 // in each heap region.
216
217 static const uint InvalidCSetIndex = UINT_MAX;
218
219 // The index in the optional regions array, if this region
220 // is considered optional during a mixed collections.
221 uint _index_in_opt_cset;
222
223 // Data for young region survivor prediction.
224 uint _young_index_in_cset;
225 SurvRateGroup* _surv_rate_group;
226 int _age_index;
227
228 // The start of the unmarked area. The unmarked area extends from this
229 // word until the top and/or end of the region, and is the part
230 // of the region for which no marking was done, i.e. objects may
231 // have been allocated in this part since the last mark phase.
232 // "prev" is the top at the start of the last completed marking.
233 // "next" is the top at the start of the in-progress marking (if any.)
234 HeapWord* _prev_top_at_mark_start;
235 HeapWord* _next_top_at_mark_start;
236
237 void init_top_at_mark_start() {
238 assert(_prev_marked_bytes == 0 &&
239 _next_marked_bytes == 0,
240 "Must be called after zero_marked_bytes.");
241 HeapWord* bot = bottom();
242 _prev_top_at_mark_start = bot;
243 _next_top_at_mark_start = bot;
244 }
245
246 // Cached attributes used in the collection set policy information
247
248 // The RSet length that was added to the total value
249 // for the collection set.
250 size_t _recorded_rs_length;
251
252 // The predicted elapsed time that was added to total value
253 // for the collection set.
254 double _predicted_elapsed_time_ms;
255
256 // Iterate over the references covered by the given MemRegion in a humongous
257 // object and apply the given closure to them.
258 // Humongous objects are allocated directly in the old-gen. So we need special
259 // handling for concurrent processing encountering an in-progress allocation.
260 // Returns the address after the last actually scanned or NULL if the area could
261 // not be scanned (That should only happen when invoked concurrently with the
262 // mutator).
263 template <class Closure, bool is_gc_active>
264 inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,
265 Closure* cl,
266 G1CollectedHeap* g1h);
267
268 // Returns the block size of the given (dead, potentially having its class unloaded) object
269 // starting at p extending to at most the prev TAMS using the given mark bitmap.
270 inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
271 public:
272 HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr);
273
274 // Initializing the HeapRegion not only resets the data structure, but also
275 // resets the BOT for that heap region.
276 // The default values for clear_space means that we will do the clearing if
277 // there's clearing to be done ourselves. We also always mangle the space.
278 void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
279
280 static int LogOfHRGrainBytes;
281 static int LogOfHRGrainWords;
282 static int LogCardsPerRegion;
283
284 static size_t GrainBytes;
285 static size_t GrainWords;
286 static size_t CardsPerRegion;
287
288 static size_t align_up_to_region_byte_size(size_t sz) {
289 return (sz + (size_t) GrainBytes - 1) &
290 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
291 }
292
293
294 // Returns whether a field is in the same region as the obj it points to.
295 template <typename T>
296 static bool is_in_same_region(T* p, oop obj) {
297 assert(p != NULL, "p can't be NULL");
298 assert(obj != NULL, "obj can't be NULL");
307 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
308 // CardsPerRegion). All those fields are considered constant
309 // throughout the JVM's execution, therefore they should only be set
310 // up once during initialization time.
311 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
312
313 // All allocated blocks are occupied by objects in a HeapRegion
314 bool block_is_obj(const HeapWord* p) const;
315
316 // Returns whether the given object is dead based on TAMS and bitmap.
317 bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
318
319 // Returns the object size for all valid block starts
320 // and the amount of unallocated words if called on top()
321 size_t block_size(const HeapWord* p) const;
322
323 // Scans through the region using the bitmap to determine what
324 // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
325 template<typename ApplyToMarkedClosure>
326 inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
327 // Update heap region to be consistent after compaction.
328 void complete_compaction();
329
330 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
331 inline HeapWord* allocate_no_bot_updates(size_t word_size);
332 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
333
334 // If this region is a member of a HeapRegionManager, the index in that
335 // sequence, otherwise -1.
336 uint hrm_index() const { return _hrm_index; }
337
338 // The number of bytes marked live in the region in the last marking phase.
339 size_t marked_bytes() { return _prev_marked_bytes; }
340 size_t live_bytes() {
341 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
342 }
343
344 // The number of bytes counted in the next marking.
345 size_t next_marked_bytes() { return _next_marked_bytes; }
346 // The number of bytes live wrt the next marking.
648 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
649
650 void print() const;
651 void print_on(outputStream* st) const;
652
653 // vo == UsePrevMarking -> use "prev" marking information,
654 // vo == UseNextMarking -> use "next" marking information
655 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
656 //
657 // NOTE: Only the "prev" marking information is guaranteed to be
658 // consistent most of the time, so most calls to this should use
659 // vo == UsePrevMarking.
660 // Currently, there is only one case where this is called with
661 // vo == UseNextMarking, which is to verify the "next" marking
662 // information at the end of remark.
663 // Currently there is only one place where this is called with
664 // vo == UseFullMarking, which is to verify the marking during a
665 // full GC.
666 void verify(VerifyOption vo, bool *failures) const;
667
668 // Verify using the "prev" marking information
669 void verify() const;
670
671 void verify_rem_set(VerifyOption vo, bool *failures) const;
672 void verify_rem_set() const;
673 };
674
675 // HeapRegionClosure is used for iterating over regions.
676 // Terminates the iteration when the "do_heap_region" method returns "true".
677 class HeapRegionClosure : public StackObj {
678 friend class HeapRegionManager;
679 friend class G1CollectionSet;
680 friend class G1CollectionSetCandidates;
681
682 bool _is_complete;
683 void set_incomplete() { _is_complete = false; }
684
685 public:
686 HeapRegionClosure(): _is_complete(true) {}
687
688 // Typically called on each region until it returns true.
689 virtual bool do_heap_region(HeapRegion* r) = 0;
690
691 // True after iteration if the closure was applied to all heap regions
692 // and returned "false" in all cases.
693 bool is_complete() { return _is_complete; }
694 };
695
696 #endif // SHARE_GC_G1_HEAPREGION_HPP
|