39 // can be collected independently.
40
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
47
48 // Each heap region is self contained. top() and end() can never
49 // be set beyond the end of the region. For humongous objects,
50 // the first region is a StartsHumongous region. If the humongous
51 // object is larger than a heap region, the following regions will
52 // be of type ContinuesHumongous. In this case the top() of the
53 // StartHumongous region and all ContinuesHumongous regions except
54 // the last will point to their own end. The last ContinuesHumongous
55 // region may have top() equal the end of object if there isn't
56 // room for filler objects to pad out to the end of the region.
57
58 class G1CollectedHeap;
59 class HeapRegionRemSet;
60 class HeapRegionRemSetIterator;
61 class HeapRegion;
62 class HeapRegionSetBase;
63 class nmethod;
64
65 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
66 #define HR_FORMAT_PARAMS(_hr_) \
67 (_hr_)->hrm_index(), \
68 (_hr_)->get_short_type_str(), \
69 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
70
71 // sentinel value for hrm_index
72 #define G1_NO_HRM_INDEX ((uint) -1)
73
74 // A dirty card to oop closure for heap regions. It
75 // knows how to get the G1 heap and how to use the bitmap
76 // in the concurrent marker used by G1 to filter remembered
77 // sets.
78
231 // (Might want to make this "inline" later, to avoid some alloc failure
232 // issues.)
233 HeapRegionRemSet* _rem_set;
234
235 // Auxiliary functions for scan_and_forward support.
236 // See comments for CompactibleSpace for more information.
237 inline HeapWord* scan_limit() const {
238 return top();
239 }
240
241 inline bool scanned_block_is_obj(const HeapWord* addr) const {
242 return true; // Always true, since scan_limit is top
243 }
244
245 inline size_t scanned_block_size(const HeapWord* addr) const {
246 return HeapRegion::block_size(addr); // Avoid virtual call
247 }
248
249 void report_region_type_change(G1HeapRegionTraceType::Type to);
250
251 protected:
252 // The index of this region in the heap region sequence.
253 uint _hrm_index;
254
255 AllocationContext_t _allocation_context;
256
257 HeapRegionType _type;
258
259 // For a humongous region, region in which it starts.
260 HeapRegion* _humongous_start_region;
261
262 // True iff an attempt to evacuate an object in the region failed.
263 bool _evacuation_failed;
264
265 // Fields used by the HeapRegionSetBase class and subclasses.
266 HeapRegion* _next;
267 HeapRegion* _prev;
268 #ifdef ASSERT
269 HeapRegionSetBase* _containing_set;
270 #endif // ASSERT
294
295 void init_top_at_mark_start() {
296 assert(_prev_marked_bytes == 0 &&
297 _next_marked_bytes == 0,
298 "Must be called after zero_marked_bytes.");
299 HeapWord* bot = bottom();
300 _prev_top_at_mark_start = bot;
301 _next_top_at_mark_start = bot;
302 }
303
304 // Cached attributes used in the collection set policy information
305
306 // The RSet length that was added to the total value
307 // for the collection set.
308 size_t _recorded_rs_length;
309
310 // The predicted elapsed time that was added to total value
311 // for the collection set.
312 double _predicted_elapsed_time_ms;
313
314 public:
315 HeapRegion(uint hrm_index,
316 G1BlockOffsetTable* bot,
317 MemRegion mr);
318
319 // Initializing the HeapRegion not only resets the data structure, but also
320 // resets the BOT for that heap region.
321 // The default values for clear_space means that we will do the clearing if
322 // there's clearing to be done ourselves. We also always mangle the space.
323 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
324
325 static int LogOfHRGrainBytes;
326 static int LogOfHRGrainWords;
327
328 static size_t GrainBytes;
329 static size_t GrainWords;
330 static size_t CardsPerRegion;
331
332 static size_t align_up_to_region_byte_size(size_t sz) {
333 return (sz + (size_t) GrainBytes - 1) &
340 static bool is_in_same_region(T* p, oop obj) {
341 assert(p != NULL, "p can't be NULL");
342 assert(obj != NULL, "obj can't be NULL");
343 return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
344 }
345
346 static size_t max_region_size();
347 static size_t min_region_size_in_words();
348
349 // It sets up the heap region size (GrainBytes / GrainWords), as
350 // well as other related fields that are based on the heap region
351 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
352 // CardsPerRegion). All those fields are considered constant
353 // throughout the JVM's execution, therefore they should only be set
354 // up once during initialization time.
355 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
356
357 // All allocated blocks are occupied by objects in a HeapRegion
358 bool block_is_obj(const HeapWord* p) const;
359
360 // Returns the object size for all valid block starts
361 // and the amount of unallocated words if called on top()
362 size_t block_size(const HeapWord* p) const;
363
364 // Override for scan_and_forward support.
365 void prepare_for_compaction(CompactPoint* cp);
366
367 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
368 inline HeapWord* allocate_no_bot_updates(size_t word_size);
369 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
370
371 // If this region is a member of a HeapRegionManager, the index in that
372 // sequence, otherwise -1.
373 uint hrm_index() const { return _hrm_index; }
374
375 // The number of bytes marked live in the region in the last marking phase.
376 size_t marked_bytes() { return _prev_marked_bytes; }
377 size_t live_bytes() {
378 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
379 }
635 return (HeapWord *) obj >= prev_top_at_mark_start();
636 }
637 bool obj_allocated_since_next_marking(oop obj) const {
638 return (HeapWord *) obj >= next_top_at_mark_start();
639 }
640
641 // Returns the "evacuation_failed" property of the region.
642 bool evacuation_failed() { return _evacuation_failed; }
643
644 // Sets the "evacuation_failed" property of the region.
645 void set_evacuation_failed(bool b) {
646 _evacuation_failed = b;
647
648 if (b) {
649 _next_marked_bytes = 0;
650 }
651 }
652
653 // Iterate over the objects overlapping part of a card, applying cl
654 // to all references in the region. This is a helper for
655 // G1RemSet::refine_card, and is tightly coupled with it.
656 // mr: the memory region covered by the card, trimmed to the
657 // allocated space for this region. Must not be empty.
658 // This region must be old or humongous.
659 // Returns true if the designated objects were successfully
660 // processed, false if an unparsable part of the heap was
661 // encountered; that only happens when invoked concurrently with the
662 // mutator.
663 bool oops_on_card_seq_iterate_careful(MemRegion mr,
664 G1UpdateRSOrPushRefOopClosure* cl);
665
666 size_t recorded_rs_length() const { return _recorded_rs_length; }
667 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
668
669 void set_recorded_rs_length(size_t rs_length) {
670 _recorded_rs_length = rs_length;
671 }
672
673 void set_predicted_elapsed_time_ms(double ms) {
674 _predicted_elapsed_time_ms = ms;
675 }
676
677 virtual CompactibleSpace* next_compaction_space() const;
678
679 virtual void reset_after_compaction();
680
681 // Routines for managing a list of code roots (attached to the
682 // this region's RSet) that point into this heap region.
683 void add_strong_code_root(nmethod* nm);
684 void add_strong_code_root_locked(nmethod* nm);
|
39 // can be collected independently.
40
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
47
48 // Each heap region is self contained. top() and end() can never
49 // be set beyond the end of the region. For humongous objects,
50 // the first region is a StartsHumongous region. If the humongous
51 // object is larger than a heap region, the following regions will
52 // be of type ContinuesHumongous. In this case the top() of the
53 // StartHumongous region and all ContinuesHumongous regions except
54 // the last will point to their own end. The last ContinuesHumongous
55 // region may have top() equal the end of object if there isn't
56 // room for filler objects to pad out to the end of the region.
57
58 class G1CollectedHeap;
59 class G1CMBitMapRO;
60 class HeapRegionRemSet;
61 class HeapRegionRemSetIterator;
62 class HeapRegion;
63 class HeapRegionSetBase;
64 class nmethod;
65
66 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
67 #define HR_FORMAT_PARAMS(_hr_) \
68 (_hr_)->hrm_index(), \
69 (_hr_)->get_short_type_str(), \
70 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
71
72 // sentinel value for hrm_index
73 #define G1_NO_HRM_INDEX ((uint) -1)
74
75 // A dirty card to oop closure for heap regions. It
76 // knows how to get the G1 heap and how to use the bitmap
77 // in the concurrent marker used by G1 to filter remembered
78 // sets.
79
232 // (Might want to make this "inline" later, to avoid some alloc failure
233 // issues.)
234 HeapRegionRemSet* _rem_set;
235
236 // Auxiliary functions for scan_and_forward support.
237 // See comments for CompactibleSpace for more information.
238 inline HeapWord* scan_limit() const {
239 return top();
240 }
241
242 inline bool scanned_block_is_obj(const HeapWord* addr) const {
243 return true; // Always true, since scan_limit is top
244 }
245
246 inline size_t scanned_block_size(const HeapWord* addr) const {
247 return HeapRegion::block_size(addr); // Avoid virtual call
248 }
249
250 void report_region_type_change(G1HeapRegionTraceType::Type to);
251
252 // Returns whether the given object address refers to a dead object, and either the
253 // size of the object (if live) or the size of the block (if dead) in size.
254 // May
255 // - only called with obj < top()
256 // - not called on humongous objects or archive regions
257 inline bool is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const;
258
259 protected:
260 // The index of this region in the heap region sequence.
261 uint _hrm_index;
262
263 AllocationContext_t _allocation_context;
264
265 HeapRegionType _type;
266
267 // For a humongous region, region in which it starts.
268 HeapRegion* _humongous_start_region;
269
270 // True iff an attempt to evacuate an object in the region failed.
271 bool _evacuation_failed;
272
273 // Fields used by the HeapRegionSetBase class and subclasses.
274 HeapRegion* _next;
275 HeapRegion* _prev;
276 #ifdef ASSERT
277 HeapRegionSetBase* _containing_set;
278 #endif // ASSERT
302
303 void init_top_at_mark_start() {
304 assert(_prev_marked_bytes == 0 &&
305 _next_marked_bytes == 0,
306 "Must be called after zero_marked_bytes.");
307 HeapWord* bot = bottom();
308 _prev_top_at_mark_start = bot;
309 _next_top_at_mark_start = bot;
310 }
311
312 // Cached attributes used in the collection set policy information
313
314 // The RSet length that was added to the total value
315 // for the collection set.
316 size_t _recorded_rs_length;
317
318 // The predicted elapsed time that was added to total value
319 // for the collection set.
320 double _predicted_elapsed_time_ms;
321
322 // Iterate over the references in a humongous objects and apply the given closure
323 // to them.
324 // Humongous objects are allocated directly in the old-gen. So we need special
325 // handling for concurrent processing encountering an in-progress allocation.
326 template <class Closure, bool is_gc_active>
327 inline bool do_oops_on_card_in_humongous(MemRegion mr,
328 Closure* cl,
329 G1CollectedHeap* g1h);
330
331 // Returns the block size of the given (dead, potentially having its class unloaded) object
332 // starting at p extending to at most the prev TAMS using the given mark bitmap.
333 inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMapRO* prev_bitmap) const;
334 public:
335 HeapRegion(uint hrm_index,
336 G1BlockOffsetTable* bot,
337 MemRegion mr);
338
339 // Initializing the HeapRegion not only resets the data structure, but also
340 // resets the BOT for that heap region.
341 // The default values for clear_space means that we will do the clearing if
342 // there's clearing to be done ourselves. We also always mangle the space.
343 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
344
345 static int LogOfHRGrainBytes;
346 static int LogOfHRGrainWords;
347
348 static size_t GrainBytes;
349 static size_t GrainWords;
350 static size_t CardsPerRegion;
351
352 static size_t align_up_to_region_byte_size(size_t sz) {
353 return (sz + (size_t) GrainBytes - 1) &
360 static bool is_in_same_region(T* p, oop obj) {
361 assert(p != NULL, "p can't be NULL");
362 assert(obj != NULL, "obj can't be NULL");
363 return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
364 }
365
366 static size_t max_region_size();
367 static size_t min_region_size_in_words();
368
369 // It sets up the heap region size (GrainBytes / GrainWords), as
370 // well as other related fields that are based on the heap region
371 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
372 // CardsPerRegion). All those fields are considered constant
373 // throughout the JVM's execution, therefore they should only be set
374 // up once during initialization time.
375 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
376
377 // All allocated blocks are occupied by objects in a HeapRegion
378 bool block_is_obj(const HeapWord* p) const;
379
380 // Returns whether the given object is dead based on TAMS and bitmap.
381 bool is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const;
382
383 // Returns the object size for all valid block starts
384 // and the amount of unallocated words if called on top()
385 size_t block_size(const HeapWord* p) const;
386
387 // Override for scan_and_forward support.
388 void prepare_for_compaction(CompactPoint* cp);
389
390 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
391 inline HeapWord* allocate_no_bot_updates(size_t word_size);
392 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
393
394 // If this region is a member of a HeapRegionManager, the index in that
395 // sequence, otherwise -1.
396 uint hrm_index() const { return _hrm_index; }
397
398 // The number of bytes marked live in the region in the last marking phase.
399 size_t marked_bytes() { return _prev_marked_bytes; }
400 size_t live_bytes() {
401 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
402 }
658 return (HeapWord *) obj >= prev_top_at_mark_start();
659 }
660 bool obj_allocated_since_next_marking(oop obj) const {
661 return (HeapWord *) obj >= next_top_at_mark_start();
662 }
663
664 // Returns the "evacuation_failed" property of the region.
665 bool evacuation_failed() { return _evacuation_failed; }
666
667 // Sets the "evacuation_failed" property of the region.
668 void set_evacuation_failed(bool b) {
669 _evacuation_failed = b;
670
671 if (b) {
672 _next_marked_bytes = 0;
673 }
674 }
675
676 // Iterate over the objects overlapping part of a card, applying cl
677 // to all references in the region. This is a helper for
678 // G1RemSet::refine_card*, and is tightly coupled with them.
679 // mr is the memory region covered by the card, trimmed to the
680 // allocated space for this region. Must not be empty.
681 // This region must be old or humongous.
682 // Returns true if the designated objects were successfully
683 // processed, false if an unparsable part of the heap was
684 // encountered; that only happens when invoked concurrently with the
685 // mutator.
686 template <bool is_gc_active, class Closure>
687 inline bool oops_on_card_seq_iterate_careful(MemRegion mr, Closure* cl);
688
689 size_t recorded_rs_length() const { return _recorded_rs_length; }
690 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
691
692 void set_recorded_rs_length(size_t rs_length) {
693 _recorded_rs_length = rs_length;
694 }
695
696 void set_predicted_elapsed_time_ms(double ms) {
697 _predicted_elapsed_time_ms = ms;
698 }
699
700 virtual CompactibleSpace* next_compaction_space() const;
701
702 virtual void reset_after_compaction();
703
704 // Routines for managing a list of code roots (attached to the
705 // this region's RSet) that point into this heap region.
706 void add_strong_code_root(nmethod* nm);
707 void add_strong_code_root_locked(nmethod* nm);
|