15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
30 #include "gc_implementation/g1/concurrentMark.hpp"
31 #include "gc_implementation/g1/evacuationInfo.hpp"
32 #include "gc_implementation/g1/g1AllocRegion.hpp"
33 #include "gc_implementation/g1/g1BiasedArray.hpp"
34 #include "gc_implementation/g1/g1HRPrinter.hpp"
35 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #include "gc_implementation/g1/g1YCTypes.hpp"
38 #include "gc_implementation/g1/heapRegionManager.hpp"
39 #include "gc_implementation/g1/heapRegionSet.hpp"
40 #include "gc_implementation/shared/hSpaceCounters.hpp"
41 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
42 #include "memory/barrierSet.hpp"
43 #include "memory/memRegion.hpp"
44 #include "memory/sharedHeap.hpp"
45 #include "utilities/stack.hpp"
46
47 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
48 // It uses the "Garbage First" heap organization and algorithm, which
49 // may combine concurrent marking with parallel, incremental compaction of
50 // heap subsets that will yield large amounts of garbage.
51
52 // Forward declarations
53 class HeapRegion;
54 class HRRSCleanupTask;
530 int* gclocker_retry_count_ret);
531
532 // Allocation attempt that should be called during safepoints (e.g.,
533 // at the end of a successful GC). expect_null_mutator_alloc_region
534 // specifies whether the mutator alloc region is expected to be NULL
535 // or not.
536 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
537 AllocationContext_t context,
538 bool expect_null_mutator_alloc_region);
539
540 // It dirties the cards that cover the block so that so that the post
541 // write barrier never queues anything when updating objects on this
542 // block. It is assumed (and in fact we assert) that the block
543 // belongs to a young region.
544 inline void dirty_young_block(HeapWord* start, size_t word_size);
545
546 // Allocate blocks during garbage collection. Will ensure an
547 // allocation region, either by picking one or expanding the
548 // heap, and then allocate a block of the given size. The block
549 // may not be a humongous - it must fit into a single heap region.
550 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
551 size_t word_size,
552 AllocationContext_t context);
553
554 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
555 HeapRegion* alloc_region,
556 bool par,
557 size_t word_size);
558
559 // Ensure that no further allocations can happen in "r", bearing in mind
560 // that parallel threads might be attempting allocations.
561 void par_allocate_remaining_space(HeapRegion* r);
562
563 // Allocation attempt during GC for a survivor object / PLAB.
564 inline HeapWord* survivor_attempt_allocation(size_t word_size,
565 AllocationContext_t context);
566
567 // Allocation attempt during GC for an old object / PLAB.
568 inline HeapWord* old_attempt_allocation(size_t word_size,
569 AllocationContext_t context);
570
571 // These methods are the "callbacks" from the G1AllocRegion class.
572
573 // For mutator alloc regions.
574 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
575 void retire_mutator_alloc_region(HeapRegion* alloc_region,
576 size_t allocated_bytes);
577
578 // For GC alloc regions.
579 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
580 GCAllocPurpose ap);
581 void retire_gc_alloc_region(HeapRegion* alloc_region,
582 size_t allocated_bytes, GCAllocPurpose ap);
583
584 // - if explicit_gc is true, the GC is for a System.gc() or a heap
585 // inspection request and should collect the entire heap
586 // - if clear_all_soft_refs is true, all soft references should be
587 // cleared during the GC
588 // - if explicit_gc is false, word_size describes the allocation that
589 // the GC should attempt (at least) to satisfy
590 // - it returns false if it is unable to do the collection due to the
591 // GC locker being active, true otherwise
592 bool do_collection(bool explicit_gc,
593 bool clear_all_soft_refs,
594 size_t word_size);
595
596 // Callback from VM_G1CollectFull operation.
597 // Perform a full collection.
598 virtual void do_full_collection(bool clear_all_soft_refs);
599
600 // Resize the heap if necessary after a full collection. If this is
601 // after a collect-for allocation, "word_size" is the allocation size,
602 // and will be considered part of the used portion of the heap.
623 // after processing.
624 void enqueue_discovered_references(uint no_of_gc_workers);
625
626 public:
627
628 G1Allocator* allocator() {
629 return _allocator;
630 }
631
632 G1MonitoringSupport* g1mm() {
633 assert(_g1mm != NULL, "should have been initialized");
634 return _g1mm;
635 }
636
637 // Expand the garbage-first heap by at least the given size (in bytes!).
638 // Returns true if the heap was expanded by the requested amount;
639 // false otherwise.
640 // (Rounds up to a HeapRegion boundary.)
641 bool expand(size_t expand_bytes);
642
643 // Returns the PLAB statistics given a purpose.
644 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
645 PLABStats* stats = NULL;
646
647 switch (purpose) {
648 case GCAllocForSurvived:
649 stats = &_survivor_plab_stats;
650 break;
651 case GCAllocForTenured:
652 stats = &_old_plab_stats;
653 break;
654 default:
655 assert(false, "unrecognized GCAllocPurpose");
656 }
657
658 return stats;
659 }
660
661 // Determines PLAB size for a particular allocation purpose.
662 size_t desired_plab_sz(GCAllocPurpose purpose);
663
664 inline AllocationContextStats& allocation_context_stats();
665
666 // Do anything common to GC's.
667 virtual void gc_prologue(bool full);
668 virtual void gc_epilogue(bool full);
669
670 inline void set_humongous_is_live(oop obj);
671
672 bool humongous_is_live(uint region) {
673 return _humongous_is_live.is_live(region);
674 }
675
676 // Returns whether the given region (which must be a humongous (start) region)
677 // is to be considered conservatively live regardless of any other conditions.
678 bool humongous_region_is_always_live(uint index);
679 // Register the given region to be part of the collection set.
680 inline void register_humongous_region_with_in_cset_fast_test(uint index);
681 // Register regions with humongous objects (actually on the start region) in
682 // the in_cset_fast_test table.
683 void register_humongous_regions_with_in_cset_fast_test();
684 // We register a region with the fast "in collection set" test. We
685 // simply set to true the array slot corresponding to this region.
686 void register_region_with_in_cset_fast_test(HeapRegion* r) {
687 _in_cset_fast_test.set_in_cset(r->hrm_index());
688 }
689
690 // This is a fast test on whether a reference points into the
691 // collection set or not. Assume that the reference
692 // points into the heap.
693 inline bool in_cset_fast_test(oop obj);
694
695 void clear_cset_fast_test() {
696 _in_cset_fast_test.clear();
697 }
698
699 // This is called at the start of either a concurrent cycle or a Full
700 // GC to update the number of old marking cycles started.
701 void increment_old_marking_cycles_started();
702
703 // This is called at the end of either a concurrent cycle or a Full
704 // GC to update the number of old marking cycles completed. Those two
705 // can happen in a nested fashion, i.e., we start a concurrent
706 // cycle, a Full GC happens half-way through it which ends first,
707 // and then the cycle notices that a Full GC happened and ends
1164 // or "next".
1165 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1166 HeapWord* from, HeapWord* limit);
1167
1168 // Verify that the prev / next bitmap range [tams,end) for the given
1169 // region has no marks. Return true if all is well, false if errors
1170 // are detected.
1171 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1172 #endif // PRODUCT
1173
1174 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1175 // the given region do not have any spurious marks. If errors are
1176 // detected, print appropriate error messages and crash.
1177 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1178
1179 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1180 // have any spurious marks. If errors are detected, print
1181 // appropriate error messages and crash.
1182 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1183
1184 // verify_region_sets() performs verification over the region
1185 // lists. It will be compiled in the product code to be used when
1186 // necessary (i.e., during heap verification).
1187 void verify_region_sets();
1188
1189 // verify_region_sets_optional() is planted in the code for
1190 // list verification in non-product builds (and it can be enabled in
1191 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1192 #if HEAP_REGION_SET_FORCE_VERIFY
1193 void verify_region_sets_optional() {
1194 verify_region_sets();
1195 }
1196 #else // HEAP_REGION_SET_FORCE_VERIFY
1197 void verify_region_sets_optional() { }
1198 #endif // HEAP_REGION_SET_FORCE_VERIFY
1199
1200 #ifdef ASSERT
1201 bool is_on_master_free_list(HeapRegion* hr) {
1202 return _hrm.is_free(hr);
1203 }
1259 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1260 void prepend_to_freelist(FreeRegionList* list);
1261 void decrement_summary_bytes(size_t bytes);
1262
1263 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1264 virtual bool is_in(const void* p) const;
1265 #ifdef ASSERT
1266 // Returns whether p is in one of the available areas of the heap. Slow but
1267 // extensive version.
1268 bool is_in_exact(const void* p) const;
1269 #endif
1270
1271 // Return "TRUE" iff the given object address is within the collection
1272 // set. Slow implementation.
1273 inline bool obj_in_cs(oop obj);
1274
1275 inline bool is_in_cset(oop obj);
1276
1277 inline bool is_in_cset_or_humongous(const oop obj);
1278
1279 enum in_cset_state_t {
1280 InNeither, // neither in collection set nor humongous
1281 InCSet, // region is in collection set only
1282 IsHumongous // region is a humongous start region
1283 };
1284 private:
1285 // Instances of this class are used for quick tests on whether a reference points
1286 // into the collection set or is a humongous object (points into a humongous
1287 // object).
1288 // Each of the array's elements denotes whether the corresponding region is in
1289 // the collection set or a humongous region.
1290 // We use this to quickly reclaim humongous objects: by making a humongous region
1291 // succeed this test, we sort-of add it to the collection set. During the reference
1292 // iteration closures, when we see a humongous region, we simply mark it as
1293 // referenced, i.e. live.
1294 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
1295 protected:
1296 char default_value() const { return G1CollectedHeap::InNeither; }
1297 public:
1298 void set_humongous(uintptr_t index) {
1299 assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
1300 set_by_index(index, G1CollectedHeap::IsHumongous);
1301 }
1302
1303 void clear_humongous(uintptr_t index) {
1304 set_by_index(index, G1CollectedHeap::InNeither);
1305 }
1306
1307 void set_in_cset(uintptr_t index) {
1308 assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
1309 set_by_index(index, G1CollectedHeap::InCSet);
1310 }
1311
1312 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
1313 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
1314 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
1315 void clear() { G1BiasedMappedArray<char>::clear(); }
1316 };
1317
1318 // This array is used for a quick test on whether a reference points into
1319 // the collection set or not. Each of the array's elements denotes whether the
1320 // corresponding region is in the collection set or not.
1321 G1FastCSetBiasedMappedArray _in_cset_fast_test;
1322
1323 public:
1324
1325 inline in_cset_state_t in_cset_state(const oop obj);
1326
1327 // Return "TRUE" iff the given object address is in the reserved
1328 // region of g1.
1329 bool is_in_g1_reserved(const void* p) const {
1330 return _hrm.reserved().contains(p);
1331 }
1332
1333 // Returns a MemRegion that corresponds to the space that has been
1334 // reserved for the heap
1335 MemRegion g1_reserved() const {
1336 return _hrm.reserved();
1337 }
1338
1339 virtual bool is_in_closed_subset(const void* p) const;
1340
1341 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1342 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1343 }
1344
1345 // This resets the card table to all zeros. It is used after
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
30 #include "gc_implementation/g1/concurrentMark.hpp"
31 #include "gc_implementation/g1/evacuationInfo.hpp"
32 #include "gc_implementation/g1/g1AllocRegion.hpp"
33 #include "gc_implementation/g1/g1BiasedArray.hpp"
34 #include "gc_implementation/g1/g1HRPrinter.hpp"
35 #include "gc_implementation/g1/g1InCSetState.hpp"
36 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
38 #include "gc_implementation/g1/g1YCTypes.hpp"
39 #include "gc_implementation/g1/heapRegionManager.hpp"
40 #include "gc_implementation/g1/heapRegionSet.hpp"
41 #include "gc_implementation/shared/hSpaceCounters.hpp"
42 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
43 #include "memory/barrierSet.hpp"
44 #include "memory/memRegion.hpp"
45 #include "memory/sharedHeap.hpp"
46 #include "utilities/stack.hpp"
47
48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
49 // It uses the "Garbage First" heap organization and algorithm, which
50 // may combine concurrent marking with parallel, incremental compaction of
51 // heap subsets that will yield large amounts of garbage.
52
53 // Forward declarations
54 class HeapRegion;
55 class HRRSCleanupTask;
531 int* gclocker_retry_count_ret);
532
533 // Allocation attempt that should be called during safepoints (e.g.,
534 // at the end of a successful GC). expect_null_mutator_alloc_region
535 // specifies whether the mutator alloc region is expected to be NULL
536 // or not.
537 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
538 AllocationContext_t context,
539 bool expect_null_mutator_alloc_region);
540
541 // It dirties the cards that cover the block so that so that the post
542 // write barrier never queues anything when updating objects on this
543 // block. It is assumed (and in fact we assert) that the block
544 // belongs to a young region.
545 inline void dirty_young_block(HeapWord* start, size_t word_size);
546
547 // Allocate blocks during garbage collection. Will ensure an
548 // allocation region, either by picking one or expanding the
549 // heap, and then allocate a block of the given size. The block
550 // may not be a humongous - it must fit into a single heap region.
551 inline HeapWord* par_allocate_during_gc(InCSetState dest,
552 size_t word_size,
553 AllocationContext_t context);
554 // Ensure that no further allocations can happen in "r", bearing in mind
555 // that parallel threads might be attempting allocations.
556 void par_allocate_remaining_space(HeapRegion* r);
557
558 // Allocation attempt during GC for a survivor object / PLAB.
559 inline HeapWord* survivor_attempt_allocation(size_t word_size,
560 AllocationContext_t context);
561
562 // Allocation attempt during GC for an old object / PLAB.
563 inline HeapWord* old_attempt_allocation(size_t word_size,
564 AllocationContext_t context);
565
566 // These methods are the "callbacks" from the G1AllocRegion class.
567
568 // For mutator alloc regions.
569 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
570 void retire_mutator_alloc_region(HeapRegion* alloc_region,
571 size_t allocated_bytes);
572
573 // For GC alloc regions.
574 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
575 InCSetState dest);
576 void retire_gc_alloc_region(HeapRegion* alloc_region,
577 size_t allocated_bytes, InCSetState dest);
578
579 // - if explicit_gc is true, the GC is for a System.gc() or a heap
580 // inspection request and should collect the entire heap
581 // - if clear_all_soft_refs is true, all soft references should be
582 // cleared during the GC
583 // - if explicit_gc is false, word_size describes the allocation that
584 // the GC should attempt (at least) to satisfy
585 // - it returns false if it is unable to do the collection due to the
586 // GC locker being active, true otherwise
587 bool do_collection(bool explicit_gc,
588 bool clear_all_soft_refs,
589 size_t word_size);
590
591 // Callback from VM_G1CollectFull operation.
592 // Perform a full collection.
593 virtual void do_full_collection(bool clear_all_soft_refs);
594
595 // Resize the heap if necessary after a full collection. If this is
596 // after a collect-for allocation, "word_size" is the allocation size,
597 // and will be considered part of the used portion of the heap.
618 // after processing.
619 void enqueue_discovered_references(uint no_of_gc_workers);
620
621 public:
622
623 G1Allocator* allocator() {
624 return _allocator;
625 }
626
627 G1MonitoringSupport* g1mm() {
628 assert(_g1mm != NULL, "should have been initialized");
629 return _g1mm;
630 }
631
632 // Expand the garbage-first heap by at least the given size (in bytes!).
633 // Returns true if the heap was expanded by the requested amount;
634 // false otherwise.
635 // (Rounds up to a HeapRegion boundary.)
636 bool expand(size_t expand_bytes);
637
638 // Returns the PLAB statistics for a given destination.
639 inline PLABStats* alloc_buffer_stats(InCSetState dest);
640
641 // Determines PLAB size for a given destination.
642 inline size_t desired_plab_sz(InCSetState dest);
643
644 inline AllocationContextStats& allocation_context_stats();
645
646 // Do anything common to GC's.
647 virtual void gc_prologue(bool full);
648 virtual void gc_epilogue(bool full);
649
650 inline void set_humongous_is_live(oop obj);
651
652 bool humongous_is_live(uint region) {
653 return _humongous_is_live.is_live(region);
654 }
655
656 // Returns whether the given region (which must be a humongous (start) region)
657 // is to be considered conservatively live regardless of any other conditions.
658 bool humongous_region_is_always_live(uint index);
659 // Register the given region to be part of the collection set.
660 inline void register_humongous_region_with_in_cset_fast_test(uint index);
661 // Register regions with humongous objects (actually on the start region) in
662 // the in_cset_fast_test table.
663 void register_humongous_regions_with_in_cset_fast_test();
664 // We register a region with the fast "in collection set" test. We
665 // simply set to true the array slot corresponding to this region.
666 void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
667 _in_cset_fast_test.set_in_young(r->hrm_index());
668 }
669 void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
670 _in_cset_fast_test.set_in_old(r->hrm_index());
671 }
672
673 // This is a fast test on whether a reference points into the
674 // collection set or not. Assume that the reference
675 // points into the heap.
676 inline bool in_cset_fast_test(oop obj);
677
678 void clear_cset_fast_test() {
679 _in_cset_fast_test.clear();
680 }
681
682 // This is called at the start of either a concurrent cycle or a Full
683 // GC to update the number of old marking cycles started.
684 void increment_old_marking_cycles_started();
685
686 // This is called at the end of either a concurrent cycle or a Full
687 // GC to update the number of old marking cycles completed. Those two
688 // can happen in a nested fashion, i.e., we start a concurrent
689 // cycle, a Full GC happens half-way through it which ends first,
690 // and then the cycle notices that a Full GC happened and ends
1147 // or "next".
1148 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1149 HeapWord* from, HeapWord* limit);
1150
1151 // Verify that the prev / next bitmap range [tams,end) for the given
1152 // region has no marks. Return true if all is well, false if errors
1153 // are detected.
1154 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1155 #endif // PRODUCT
1156
1157 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1158 // the given region do not have any spurious marks. If errors are
1159 // detected, print appropriate error messages and crash.
1160 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1161
1162 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1163 // have any spurious marks. If errors are detected, print
1164 // appropriate error messages and crash.
1165 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1166
1167 // Do sanity check on the contents of the in-cset fast test table.
1168 bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1169
1170 // verify_region_sets() performs verification over the region
1171 // lists. It will be compiled in the product code to be used when
1172 // necessary (i.e., during heap verification).
1173 void verify_region_sets();
1174
1175 // verify_region_sets_optional() is planted in the code for
1176 // list verification in non-product builds (and it can be enabled in
1177 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1178 #if HEAP_REGION_SET_FORCE_VERIFY
1179 void verify_region_sets_optional() {
1180 verify_region_sets();
1181 }
1182 #else // HEAP_REGION_SET_FORCE_VERIFY
1183 void verify_region_sets_optional() { }
1184 #endif // HEAP_REGION_SET_FORCE_VERIFY
1185
1186 #ifdef ASSERT
1187 bool is_on_master_free_list(HeapRegion* hr) {
1188 return _hrm.is_free(hr);
1189 }
1245 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1246 void prepend_to_freelist(FreeRegionList* list);
1247 void decrement_summary_bytes(size_t bytes);
1248
1249 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1250 virtual bool is_in(const void* p) const;
1251 #ifdef ASSERT
1252 // Returns whether p is in one of the available areas of the heap. Slow but
1253 // extensive version.
1254 bool is_in_exact(const void* p) const;
1255 #endif
1256
1257 // Return "TRUE" iff the given object address is within the collection
1258 // set. Slow implementation.
1259 inline bool obj_in_cs(oop obj);
1260
1261 inline bool is_in_cset(oop obj);
1262
1263 inline bool is_in_cset_or_humongous(const oop obj);
1264
1265 private:
1266 // This array is used for a quick test on whether a reference points into
1267 // the collection set or not. Each of the array's elements denotes whether the
1268 // corresponding region is in the collection set or not.
1269 G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1270
1271 public:
1272
1273 inline InCSetState in_cset_state(const oop obj);
1274
1275 // Return "TRUE" iff the given object address is in the reserved
1276 // region of g1.
1277 bool is_in_g1_reserved(const void* p) const {
1278 return _hrm.reserved().contains(p);
1279 }
1280
1281 // Returns a MemRegion that corresponds to the space that has been
1282 // reserved for the heap
1283 MemRegion g1_reserved() const {
1284 return _hrm.reserved();
1285 }
1286
1287 virtual bool is_in_closed_subset(const void* p) const;
1288
1289 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1290 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1291 }
1292
1293 // This resets the card table to all zeros. It is used after
|