16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1BiasedArray.hpp"
32 #include "gc_implementation/g1/g1HRPrinter.hpp"
33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
35 #include "gc_implementation/g1/g1YCTypes.hpp"
36 #include "gc_implementation/g1/heapRegionSeq.hpp"
37 #include "gc_implementation/g1/heapRegionSet.hpp"
38 #include "gc_implementation/shared/hSpaceCounters.hpp"
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
40 #include "memory/barrierSet.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/sharedHeap.hpp"
43 #include "utilities/stack.hpp"
44
45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
46 // It uses the "Garbage First" heap organization and algorithm, which
47 // may combine concurrent marking with parallel, incremental compaction of
48 // heap subsets that will yield large amounts of garbage.
49
50 // Forward declarations
51 class HeapRegion;
52 class HRRSCleanupTask;
53 class GenerationSpec;
54 class OopsInHeapRegionClosure;
55 class G1KlassScanClosure;
56 class G1ScanHeapEvacClosure;
274 // Tears down the region sets / lists so that they are empty and the
275 // regions on the heap do not belong to a region set / list. The
276 // only exception is the humongous set which we leave unaltered. If
277 // free_list_only is true, it will only tear down the master free
278 // list. It is called before a Full GC (free_list_only == false) or
279 // before heap shrinking (free_list_only == true).
280 void tear_down_region_sets(bool free_list_only);
281
282 // Rebuilds the region sets / lists so that they are repopulated to
283 // reflect the contents of the heap. The only exception is the
284 // humongous set which was not torn down in the first place. If
285 // free_list_only is true, it will only rebuild the master free
286 // list. It is called after a Full GC (free_list_only == false) or
287 // after heap shrinking (free_list_only == true).
288 void rebuild_region_sets(bool free_list_only);
289
290 // Callback for region mapping changed events.
291 G1RegionMappingChangedListener _listener;
292
293 // The sequence of all heap regions in the heap.
294 HeapRegionSeq _hrs;
295
296 // Alloc region used to satisfy mutator allocation requests.
297 MutatorAllocRegion _mutator_alloc_region;
298
299 // Alloc region used to satisfy allocation requests by the GC for
300 // survivor objects.
301 SurvivorGCAllocRegion _survivor_gc_alloc_region;
302
303 // PLAB sizing policy for survivors.
304 PLABStats _survivor_plab_stats;
305
306 // Alloc region used to satisfy allocation requests by the GC for
307 // old objects.
308 OldGCAllocRegion _old_gc_alloc_region;
309
310 // PLAB sizing policy for tenured objects.
311 PLABStats _old_plab_stats;
312
313 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
314 PLABStats* stats = NULL;
412
413 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
414 // concurrent cycles) we have completed.
415 volatile unsigned int _old_marking_cycles_completed;
416
417 bool _concurrent_cycle_started;
418
419 // This is a non-product method that is helpful for testing. It is
420 // called at the end of a GC and artificially expands the heap by
421 // allocating a number of dead regions. This way we can induce very
422 // frequent marking cycles and stress the cleanup / concurrent
423 // cleanup code more (as all the regions that will be allocated by
424 // this method will be found dead by the marking cycle).
425 void allocate_dummy_regions() PRODUCT_RETURN;
426
427 // Clear RSets after a compaction. It also resets the GC time stamps.
428 void clear_rsets_post_compaction();
429
430 // If the HR printer is active, dump the state of the regions in the
431 // heap after a compaction.
432 void print_hrs_post_compaction();
433
434 double verify(bool guard, const char* msg);
435 void verify_before_gc();
436 void verify_after_gc();
437
438 void log_gc_header();
439 void log_gc_footer(double pause_time_sec);
440
441 // These are macros so that, if the assert fires, we get the correct
442 // line number, file, etc.
443
444 #define heap_locking_asserts_err_msg(_extra_message_) \
445 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
446 (_extra_message_), \
447 BOOL_TO_STR(Heap_lock->owned_by_self()), \
448 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
449 BOOL_TO_STR(Thread::current()->is_VM_thread()))
450
451 #define assert_heap_locked() \
452 do { \
698 virtual void gc_prologue(bool full);
699 virtual void gc_epilogue(bool full);
700
701 inline void set_humongous_is_live(oop obj);
702
703 bool humongous_is_live(uint region) {
704 return _humongous_is_live.is_live(region);
705 }
706
707 // Returns whether the given region (which must be a humongous (start) region)
708 // is to be considered conservatively live regardless of any other conditions.
709 bool humongous_region_is_always_live(uint index);
710 // Register the given region to be part of the collection set.
711 inline void register_humongous_region_with_in_cset_fast_test(uint index);
712 // Register regions with humongous objects (actually on the start region) in
713 // the in_cset_fast_test table.
714 void register_humongous_regions_with_in_cset_fast_test();
715 // We register a region with the fast "in collection set" test. We
716 // simply set to true the array slot corresponding to this region.
717 void register_region_with_in_cset_fast_test(HeapRegion* r) {
718 _in_cset_fast_test.set_in_cset(r->hrs_index());
719 }
720
721 // This is a fast test on whether a reference points into the
722 // collection set or not. Assume that the reference
723 // points into the heap.
724 inline bool in_cset_fast_test(oop obj);
725
726 void clear_cset_fast_test() {
727 _in_cset_fast_test.clear();
728 }
729
730 // This is called at the start of either a concurrent cycle or a Full
731 // GC to update the number of old marking cycles started.
732 void increment_old_marking_cycles_started();
733
734 // This is called at the end of either a concurrent cycle or a Full
735 // GC to update the number of old marking cycles completed. Those two
736 // can happen in a nested fashion, i.e., we start a concurrent
737 // cycle, a Full GC happens half-way through it which ends first,
738 // and then the cycle notices that a Full GC happened and ends
1154 // The Concurrent Marking reference processor...
1155 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1156
1157 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1158 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1159
1160 virtual size_t capacity() const;
1161 virtual size_t used() const;
1162 // This should be called when we're not holding the heap lock. The
1163 // result might be a bit inaccurate.
1164 size_t used_unlocked() const;
1165 size_t recalculate_used() const;
1166
1167 // These virtual functions do the actual allocation.
1168 // Some heaps may offer a contiguous region for shared non-blocking
1169 // allocation, via inlined code (by exporting the address of the top and
1170 // end fields defining the extent of the contiguous allocation region.)
1171 // But G1CollectedHeap doesn't yet support this.
1172
1173 virtual bool is_maximal_no_gc() const {
1174 return _hrs.available() == 0;
1175 }
1176
1177 // The current number of regions in the heap.
1178 uint num_regions() const { return _hrs.length(); }
1179
1180 // The max number of regions in the heap.
1181 uint max_regions() const { return _hrs.max_length(); }
1182
1183 // The number of regions that are completely free.
1184 uint num_free_regions() const { return _hrs.num_free_regions(); }
1185
1186 // The number of regions that are not completely free.
1187 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1188
1189 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1190 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1191 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1192 void verify_dirty_young_regions() PRODUCT_RETURN;
1193
1194 #ifndef PRODUCT
1195 // Make sure that the given bitmap has no marked objects in the
1196 // range [from,limit). If it does, print an error message and return
1197 // false. Otherwise, just return true. bitmap_name should be "prev"
1198 // or "next".
1199 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1200 HeapWord* from, HeapWord* limit);
1201
1202 // Verify that the prev / next bitmap range [tams,end) for the given
1203 // region has no marks. Return true if all is well, false if errors
1204 // are detected.
1216 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1217
1218 // verify_region_sets() performs verification over the region
1219 // lists. It will be compiled in the product code to be used when
1220 // necessary (i.e., during heap verification).
1221 void verify_region_sets();
1222
1223 // verify_region_sets_optional() is planted in the code for
1224 // list verification in non-product builds (and it can be enabled in
1225 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1226 #if HEAP_REGION_SET_FORCE_VERIFY
1227 void verify_region_sets_optional() {
1228 verify_region_sets();
1229 }
1230 #else // HEAP_REGION_SET_FORCE_VERIFY
1231 void verify_region_sets_optional() { }
1232 #endif // HEAP_REGION_SET_FORCE_VERIFY
1233
1234 #ifdef ASSERT
1235 bool is_on_master_free_list(HeapRegion* hr) {
1236 return _hrs.is_free(hr);
1237 }
1238 #endif // ASSERT
1239
1240 // Wrapper for the region list operations that can be called from
1241 // methods outside this class.
1242
1243 void secondary_free_list_add(FreeRegionList* list) {
1244 _secondary_free_list.add_ordered(list);
1245 }
1246
1247 void append_secondary_free_list() {
1248 _hrs.insert_list_into_free_list(&_secondary_free_list);
1249 }
1250
1251 void append_secondary_free_list_if_not_empty_with_lock() {
1252 // If the secondary free list looks empty there's no reason to
1253 // take the lock and then try to append it.
1254 if (!_secondary_free_list.is_empty()) {
1255 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1256 append_secondary_free_list();
1257 }
1258 }
1259
1260 inline void old_set_remove(HeapRegion* hr);
1261
1262 size_t non_young_capacity_bytes() {
1263 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1264 }
1265
1266 void set_free_regions_coming();
1267 void reset_free_regions_coming();
1268 bool free_regions_coming() { return _free_regions_coming; }
1339 }
1340
1341 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
1342 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
1343 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
1344 void clear() { G1BiasedMappedArray<char>::clear(); }
1345 };
1346
1347 // This array is used for a quick test on whether a reference points into
1348 // the collection set or not. Each of the array's elements denotes whether the
1349 // corresponding region is in the collection set or not.
1350 G1FastCSetBiasedMappedArray _in_cset_fast_test;
1351
1352 public:
1353
1354 inline in_cset_state_t in_cset_state(const oop obj);
1355
1356 // Return "TRUE" iff the given object address is in the reserved
1357 // region of g1.
1358 bool is_in_g1_reserved(const void* p) const {
1359 return _hrs.reserved().contains(p);
1360 }
1361
1362 // Returns a MemRegion that corresponds to the space that has been
1363 // reserved for the heap
1364 MemRegion g1_reserved() const {
1365 return _hrs.reserved();
1366 }
1367
1368 virtual bool is_in_closed_subset(const void* p) const;
1369
1370 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1371 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1372 }
1373
1374 // This resets the card table to all zeros. It is used after
1375 // a collection pause which used the card table to claim cards.
1376 void cleanUpCardTable();
1377
1378 // Iteration functions.
1379
1380 // Iterate over all the ref-containing fields of all objects, calling
1381 // "cl.do_oop" on each.
1382 virtual void oop_iterate(ExtendedOopClosure* cl);
1383
1384 // Iterate over all objects, calling "cl.do_object" on each.
1385 virtual void object_iterate(ObjectClosure* cl);
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1BiasedArray.hpp"
32 #include "gc_implementation/g1/g1HRPrinter.hpp"
33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
35 #include "gc_implementation/g1/g1YCTypes.hpp"
36 #include "gc_implementation/g1/heapRegionManager.hpp"
37 #include "gc_implementation/g1/heapRegionSet.hpp"
38 #include "gc_implementation/shared/hSpaceCounters.hpp"
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
40 #include "memory/barrierSet.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/sharedHeap.hpp"
43 #include "utilities/stack.hpp"
44
45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
46 // It uses the "Garbage First" heap organization and algorithm, which
47 // may combine concurrent marking with parallel, incremental compaction of
48 // heap subsets that will yield large amounts of garbage.
49
50 // Forward declarations
51 class HeapRegion;
52 class HRRSCleanupTask;
53 class GenerationSpec;
54 class OopsInHeapRegionClosure;
55 class G1KlassScanClosure;
56 class G1ScanHeapEvacClosure;
274 // Tears down the region sets / lists so that they are empty and the
275 // regions on the heap do not belong to a region set / list. The
276 // only exception is the humongous set which we leave unaltered. If
277 // free_list_only is true, it will only tear down the master free
278 // list. It is called before a Full GC (free_list_only == false) or
279 // before heap shrinking (free_list_only == true).
280 void tear_down_region_sets(bool free_list_only);
281
282 // Rebuilds the region sets / lists so that they are repopulated to
283 // reflect the contents of the heap. The only exception is the
284 // humongous set which was not torn down in the first place. If
285 // free_list_only is true, it will only rebuild the master free
286 // list. It is called after a Full GC (free_list_only == false) or
287 // after heap shrinking (free_list_only == true).
288 void rebuild_region_sets(bool free_list_only);
289
290 // Callback for region mapping changed events.
291 G1RegionMappingChangedListener _listener;
292
293 // The sequence of all heap regions in the heap.
294 HeapRegionManager _hrm;
295
296 // Alloc region used to satisfy mutator allocation requests.
297 MutatorAllocRegion _mutator_alloc_region;
298
299 // Alloc region used to satisfy allocation requests by the GC for
300 // survivor objects.
301 SurvivorGCAllocRegion _survivor_gc_alloc_region;
302
303 // PLAB sizing policy for survivors.
304 PLABStats _survivor_plab_stats;
305
306 // Alloc region used to satisfy allocation requests by the GC for
307 // old objects.
308 OldGCAllocRegion _old_gc_alloc_region;
309
310 // PLAB sizing policy for tenured objects.
311 PLABStats _old_plab_stats;
312
313 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
314 PLABStats* stats = NULL;
412
413 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
414 // concurrent cycles) we have completed.
415 volatile unsigned int _old_marking_cycles_completed;
416
417 bool _concurrent_cycle_started;
418
419 // This is a non-product method that is helpful for testing. It is
420 // called at the end of a GC and artificially expands the heap by
421 // allocating a number of dead regions. This way we can induce very
422 // frequent marking cycles and stress the cleanup / concurrent
423 // cleanup code more (as all the regions that will be allocated by
424 // this method will be found dead by the marking cycle).
425 void allocate_dummy_regions() PRODUCT_RETURN;
426
427 // Clear RSets after a compaction. It also resets the GC time stamps.
428 void clear_rsets_post_compaction();
429
430 // If the HR printer is active, dump the state of the regions in the
431 // heap after a compaction.
432 void print_hrm_post_compaction();
433
434 double verify(bool guard, const char* msg);
435 void verify_before_gc();
436 void verify_after_gc();
437
438 void log_gc_header();
439 void log_gc_footer(double pause_time_sec);
440
441 // These are macros so that, if the assert fires, we get the correct
442 // line number, file, etc.
443
444 #define heap_locking_asserts_err_msg(_extra_message_) \
445 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
446 (_extra_message_), \
447 BOOL_TO_STR(Heap_lock->owned_by_self()), \
448 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
449 BOOL_TO_STR(Thread::current()->is_VM_thread()))
450
451 #define assert_heap_locked() \
452 do { \
698 virtual void gc_prologue(bool full);
699 virtual void gc_epilogue(bool full);
700
701 inline void set_humongous_is_live(oop obj);
702
703 bool humongous_is_live(uint region) {
704 return _humongous_is_live.is_live(region);
705 }
706
707 // Returns whether the given region (which must be a humongous (start) region)
708 // is to be considered conservatively live regardless of any other conditions.
709 bool humongous_region_is_always_live(uint index);
710 // Register the given region to be part of the collection set.
711 inline void register_humongous_region_with_in_cset_fast_test(uint index);
712 // Register regions with humongous objects (actually on the start region) in
713 // the in_cset_fast_test table.
714 void register_humongous_regions_with_in_cset_fast_test();
715 // We register a region with the fast "in collection set" test. We
716 // simply set to true the array slot corresponding to this region.
717 void register_region_with_in_cset_fast_test(HeapRegion* r) {
718 _in_cset_fast_test.set_in_cset(r->hrm_index());
719 }
720
721 // This is a fast test on whether a reference points into the
722 // collection set or not. Assume that the reference
723 // points into the heap.
724 inline bool in_cset_fast_test(oop obj);
725
726 void clear_cset_fast_test() {
727 _in_cset_fast_test.clear();
728 }
729
730 // This is called at the start of either a concurrent cycle or a Full
731 // GC to update the number of old marking cycles started.
732 void increment_old_marking_cycles_started();
733
734 // This is called at the end of either a concurrent cycle or a Full
735 // GC to update the number of old marking cycles completed. Those two
736 // can happen in a nested fashion, i.e., we start a concurrent
737 // cycle, a Full GC happens half-way through it which ends first,
738 // and then the cycle notices that a Full GC happened and ends
1154 // The Concurrent Marking reference processor...
1155 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1156
1157 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1158 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1159
1160 virtual size_t capacity() const;
1161 virtual size_t used() const;
1162 // This should be called when we're not holding the heap lock. The
1163 // result might be a bit inaccurate.
1164 size_t used_unlocked() const;
1165 size_t recalculate_used() const;
1166
1167 // These virtual functions do the actual allocation.
1168 // Some heaps may offer a contiguous region for shared non-blocking
1169 // allocation, via inlined code (by exporting the address of the top and
1170 // end fields defining the extent of the contiguous allocation region.)
1171 // But G1CollectedHeap doesn't yet support this.
1172
1173 virtual bool is_maximal_no_gc() const {
1174 return _hrm.available() == 0;
1175 }
1176
1177 // The current number of regions in the heap.
1178 uint num_regions() const { return _hrm.length(); }
1179
1180 // The max number of regions in the heap.
1181 uint max_regions() const { return _hrm.max_length(); }
1182
1183 // The number of regions that are completely free.
1184 uint num_free_regions() const { return _hrm.num_free_regions(); }
1185
1186 // The number of regions that are not completely free.
1187 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1188
1189 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1190 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1191 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1192 void verify_dirty_young_regions() PRODUCT_RETURN;
1193
1194 #ifndef PRODUCT
1195 // Make sure that the given bitmap has no marked objects in the
1196 // range [from,limit). If it does, print an error message and return
1197 // false. Otherwise, just return true. bitmap_name should be "prev"
1198 // or "next".
1199 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1200 HeapWord* from, HeapWord* limit);
1201
1202 // Verify that the prev / next bitmap range [tams,end) for the given
1203 // region has no marks. Return true if all is well, false if errors
1204 // are detected.
1216 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1217
1218 // verify_region_sets() performs verification over the region
1219 // lists. It will be compiled in the product code to be used when
1220 // necessary (i.e., during heap verification).
1221 void verify_region_sets();
1222
1223 // verify_region_sets_optional() is planted in the code for
1224 // list verification in non-product builds (and it can be enabled in
1225 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1226 #if HEAP_REGION_SET_FORCE_VERIFY
1227 void verify_region_sets_optional() {
1228 verify_region_sets();
1229 }
1230 #else // HEAP_REGION_SET_FORCE_VERIFY
1231 void verify_region_sets_optional() { }
1232 #endif // HEAP_REGION_SET_FORCE_VERIFY
1233
1234 #ifdef ASSERT
1235 bool is_on_master_free_list(HeapRegion* hr) {
1236 return _hrm.is_free(hr);
1237 }
1238 #endif // ASSERT
1239
1240 // Wrapper for the region list operations that can be called from
1241 // methods outside this class.
1242
1243 void secondary_free_list_add(FreeRegionList* list) {
1244 _secondary_free_list.add_ordered(list);
1245 }
1246
1247 void append_secondary_free_list() {
1248 _hrm.insert_list_into_free_list(&_secondary_free_list);
1249 }
1250
1251 void append_secondary_free_list_if_not_empty_with_lock() {
1252 // If the secondary free list looks empty there's no reason to
1253 // take the lock and then try to append it.
1254 if (!_secondary_free_list.is_empty()) {
1255 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1256 append_secondary_free_list();
1257 }
1258 }
1259
1260 inline void old_set_remove(HeapRegion* hr);
1261
1262 size_t non_young_capacity_bytes() {
1263 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1264 }
1265
1266 void set_free_regions_coming();
1267 void reset_free_regions_coming();
1268 bool free_regions_coming() { return _free_regions_coming; }
1339 }
1340
1341 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
1342 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
1343 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
1344 void clear() { G1BiasedMappedArray<char>::clear(); }
1345 };
1346
1347 // This array is used for a quick test on whether a reference points into
1348 // the collection set or not. Each of the array's elements denotes whether the
1349 // corresponding region is in the collection set or not.
1350 G1FastCSetBiasedMappedArray _in_cset_fast_test;
1351
1352 public:
1353
1354 inline in_cset_state_t in_cset_state(const oop obj);
1355
1356 // Return "TRUE" iff the given object address is in the reserved
1357 // region of g1.
1358 bool is_in_g1_reserved(const void* p) const {
1359 return _hrm.reserved().contains(p);
1360 }
1361
1362 // Returns a MemRegion that corresponds to the space that has been
1363 // reserved for the heap
1364 MemRegion g1_reserved() const {
1365 return _hrm.reserved();
1366 }
1367
1368 virtual bool is_in_closed_subset(const void* p) const;
1369
1370 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1371 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1372 }
1373
1374 // This resets the card table to all zeros. It is used after
1375 // a collection pause which used the card table to claim cards.
1376 void cleanUpCardTable();
1377
1378 // Iteration functions.
1379
1380 // Iterate over all the ref-containing fields of all objects, calling
1381 // "cl.do_oop" on each.
1382 virtual void oop_iterate(ExtendedOopClosure* cl);
1383
1384 // Iterate over all objects, calling "cl.do_object" on each.
1385 virtual void object_iterate(ObjectClosure* cl);
|