35 #include "utilities/macros.hpp"
36
37 #if INCLUDE_ALL_GCS
38
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
41
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
48
49 class CompactibleSpace;
50 class ContiguousSpace;
51 class HeapRegionRemSet;
52 class HeapRegionRemSetIterator;
53 class HeapRegion;
54 class HeapRegionSetBase;
55
56 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
57 #define HR_FORMAT_PARAMS(_hr_) \
58 (_hr_)->hrs_index(), \
59 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
60 (_hr_)->startsHumongous() ? "HS" : \
61 (_hr_)->continuesHumongous() ? "HC" : \
62 !(_hr_)->is_empty() ? "O" : "F", \
63 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
64
65 // sentinel value for hrs_index
66 #define G1_NULL_HRS_INDEX ((uint) -1)
67
68 // A dirty card to oop closure for heap regions. It
69 // knows how to get the G1 heap and how to use the bitmap
70 // in the concurrent marker used by G1 to filter remembered
71 // sets.
72
73 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
74 public:
320 void set_young_type(YoungType new_type) {
321 //assert(_young_type != new_type, "setting the same type" );
322 // TODO: add more assertions here
323 _young_type = new_type;
324 }
325
326 // Cached attributes used in the collection set policy information
327
328 // The RSet length that was added to the total value
329 // for the collection set.
330 size_t _recorded_rs_length;
331
332 // The predicted elapsed time that was added to total value
333 // for the collection set.
334 double _predicted_elapsed_time_ms;
335
336 // The predicted number of bytes to copy that was added to
337 // the total value for the collection set.
338 size_t _predicted_bytes_to_copy;
339
340 public:
341 HeapRegion(uint hrs_index,
342 G1BlockOffsetSharedArray* sharedOffsetArray,
343 MemRegion mr);
344
345 static int LogOfHRGrainBytes;
346 static int LogOfHRGrainWords;
347
348 static size_t GrainBytes;
349 static size_t GrainWords;
350 static size_t CardsPerRegion;
351
352 static size_t align_up_to_region_byte_size(size_t sz) {
353 return (sz + (size_t) GrainBytes - 1) &
354 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
355 }
356
357 // It sets up the heap region size (GrainBytes / GrainWords), as
358 // well as other related fields that are based on the heap region
359 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
360 // CardsPerRegion). All those fields are considered constant
361 // throughout the JVM's execution, therefore they should only be set
362 // up once during initialization time.
363 static void setup_heap_region_size(uintx min_heap_size);
364
365 enum ClaimValues {
366 InitialClaimValue = 0,
367 FinalCountClaimValue = 1,
368 NoteEndClaimValue = 2,
369 ScrubRemSetClaimValue = 3,
370 ParVerifyClaimValue = 4,
371 RebuildRSClaimValue = 5,
372 ParEvacFailureClaimValue = 6,
373 AggregateCountClaimValue = 7,
374 VerifyCountClaimValue = 8
375 };
376
377 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
378 assert(is_young(), "we can only skip BOT updates on young regions");
379 return ContiguousSpace::par_allocate(word_size);
380 }
381 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
382 assert(is_young(), "we can only skip BOT updates on young regions");
383 return ContiguousSpace::allocate(word_size);
384 }
385
386 // If this region is a member of a HeapRegionSeq, the index in that
387 // sequence, otherwise -1.
388 uint hrs_index() const { return _hrs_index; }
389
390 // The number of bytes marked live in the region in the last marking phase.
391 size_t marked_bytes() { return _prev_marked_bytes; }
392 size_t live_bytes() {
393 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
394 }
779 void set_recorded_rs_length(size_t rs_length) {
780 _recorded_rs_length = rs_length;
781 }
782
783 void set_predicted_elapsed_time_ms(double ms) {
784 _predicted_elapsed_time_ms = ms;
785 }
786
787 void set_predicted_bytes_to_copy(size_t bytes) {
788 _predicted_bytes_to_copy = bytes;
789 }
790
791 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
792 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
793 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
794
795 virtual CompactibleSpace* next_compaction_space() const;
796
797 virtual void reset_after_compaction();
798
799 void print() const;
800 void print_on(outputStream* st) const;
801
802 // vo == UsePrevMarking -> use "prev" marking information,
803 // vo == UseNextMarking -> use "next" marking information
804 // vo == UseMarkWord -> use the mark word in the object header
805 //
806 // NOTE: Only the "prev" marking information is guaranteed to be
807 // consistent most of the time, so most calls to this should use
808 // vo == UsePrevMarking.
809 // Currently, there is only one case where this is called with
810 // vo == UseNextMarking, which is to verify the "next" marking
811 // information at the end of remark.
812 // Currently there is only one place where this is called with
813 // vo == UseMarkWord, which is to verify the marking during a
814 // full GC.
815 void verify(VerifyOption vo, bool *failures) const;
816
817 // Override; it uses the "prev" marking information
818 virtual void verify() const;
|
35 #include "utilities/macros.hpp"
36
37 #if INCLUDE_ALL_GCS
38
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
41
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
48
49 class CompactibleSpace;
50 class ContiguousSpace;
51 class HeapRegionRemSet;
52 class HeapRegionRemSetIterator;
53 class HeapRegion;
54 class HeapRegionSetBase;
55 class nmethod;
56
57 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
58 #define HR_FORMAT_PARAMS(_hr_) \
59 (_hr_)->hrs_index(), \
60 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
61 (_hr_)->startsHumongous() ? "HS" : \
62 (_hr_)->continuesHumongous() ? "HC" : \
63 !(_hr_)->is_empty() ? "O" : "F", \
64 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
65
66 // sentinel value for hrs_index
67 #define G1_NULL_HRS_INDEX ((uint) -1)
68
69 // A dirty card to oop closure for heap regions. It
70 // knows how to get the G1 heap and how to use the bitmap
71 // in the concurrent marker used by G1 to filter remembered
72 // sets.
73
74 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
75 public:
321 void set_young_type(YoungType new_type) {
322 //assert(_young_type != new_type, "setting the same type" );
323 // TODO: add more assertions here
324 _young_type = new_type;
325 }
326
327 // Cached attributes used in the collection set policy information
328
329 // The RSet length that was added to the total value
330 // for the collection set.
331 size_t _recorded_rs_length;
332
333 // The predicted elapsed time that was added to total value
334 // for the collection set.
335 double _predicted_elapsed_time_ms;
336
337 // The predicted number of bytes to copy that was added to
338 // the total value for the collection set.
339 size_t _predicted_bytes_to_copy;
340
341 // A list of code blobs (nmethods) whose code contains pointers into this region
342 GrowableArray<nmethod*>* _strong_code_root_list;
343
344 public:
345 HeapRegion(uint hrs_index,
346 G1BlockOffsetSharedArray* sharedOffsetArray,
347 MemRegion mr);
348
349 static int LogOfHRGrainBytes;
350 static int LogOfHRGrainWords;
351
352 static size_t GrainBytes;
353 static size_t GrainWords;
354 static size_t CardsPerRegion;
355
356 static size_t align_up_to_region_byte_size(size_t sz) {
357 return (sz + (size_t) GrainBytes - 1) &
358 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
359 }
360
361 // It sets up the heap region size (GrainBytes / GrainWords), as
362 // well as other related fields that are based on the heap region
363 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
364 // CardsPerRegion). All those fields are considered constant
365 // throughout the JVM's execution, therefore they should only be set
366 // up once during initialization time.
367 static void setup_heap_region_size(uintx min_heap_size);
368
369 enum ClaimValues {
370 InitialClaimValue = 0,
371 FinalCountClaimValue = 1,
372 NoteEndClaimValue = 2,
373 ScrubRemSetClaimValue = 3,
374 ParVerifyClaimValue = 4,
375 RebuildRSClaimValue = 5,
376 ParEvacFailureClaimValue = 6,
377 AggregateCountClaimValue = 7,
378 VerifyCountClaimValue = 8,
379 ParMarkRootClaimValue = 9
380 };
381
382 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
383 assert(is_young(), "we can only skip BOT updates on young regions");
384 return ContiguousSpace::par_allocate(word_size);
385 }
386 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
387 assert(is_young(), "we can only skip BOT updates on young regions");
388 return ContiguousSpace::allocate(word_size);
389 }
390
391 // If this region is a member of a HeapRegionSeq, the index in that
392 // sequence, otherwise -1.
393 uint hrs_index() const { return _hrs_index; }
394
395 // The number of bytes marked live in the region in the last marking phase.
396 size_t marked_bytes() { return _prev_marked_bytes; }
397 size_t live_bytes() {
398 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
399 }
784 void set_recorded_rs_length(size_t rs_length) {
785 _recorded_rs_length = rs_length;
786 }
787
788 void set_predicted_elapsed_time_ms(double ms) {
789 _predicted_elapsed_time_ms = ms;
790 }
791
792 void set_predicted_bytes_to_copy(size_t bytes) {
793 _predicted_bytes_to_copy = bytes;
794 }
795
796 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
797 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
798 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
799
800 virtual CompactibleSpace* next_compaction_space() const;
801
802 virtual void reset_after_compaction();
803
804 // Routines for managing the list of code roots that point into
805 // this heap region.
806 void add_strong_code_root(nmethod* nm);
807 void remove_strong_code_root(nmethod* nm);
808
809 GrowableArray<nmethod*>* strong_code_root_list() {
810 return _strong_code_root_list;
811 }
812
813 // During a collection, migrate successfully evacuated strong
814 // code roots attached to this region to the new regions that
815 // they point into. Unsuccessfully evacuated code roots are
816 // not migrated.
817 void migrate_strong_code_roots();
818
819 // Applies blk->do_code_blob() to each of the entries in
820 // the strong code roots list;
821 void strong_code_roots_do(CodeBlobClosure* blk) const;
822
823 // Returns the amount of memory, in bytes, currently
824 // consumed by the strong code roots.
825 size_t strong_code_root_mem_size();
826
827 // Verify that the entries on the strong code root list are live and
828 // include at least one pointer into this region.
829 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
830
831 void print() const;
832 void print_on(outputStream* st) const;
833
834 // vo == UsePrevMarking -> use "prev" marking information,
835 // vo == UseNextMarking -> use "next" marking information
836 // vo == UseMarkWord -> use the mark word in the object header
837 //
838 // NOTE: Only the "prev" marking information is guaranteed to be
839 // consistent most of the time, so most calls to this should use
840 // vo == UsePrevMarking.
841 // Currently, there is only one case where this is called with
842 // vo == UseNextMarking, which is to verify the "next" marking
843 // information at the end of remark.
844 // Currently there is only one place where this is called with
845 // vo == UseMarkWord, which is to verify the marking during a
846 // full GC.
847 void verify(VerifyOption vo, bool *failures) const;
848
849 // Override; it uses the "prev" marking information
850 virtual void verify() const;
|