530 int* gclocker_retry_count_ret);
531
532 // Allocation attempt that should be called during safepoints (e.g.,
533 // at the end of a successful GC). expect_null_mutator_alloc_region
534 // specifies whether the mutator alloc region is expected to be NULL
535 // or not.
536 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
537 AllocationContext_t context,
538 bool expect_null_mutator_alloc_region);
539
540 // It dirties the cards that cover the block so that so that the post
541 // write barrier never queues anything when updating objects on this
542 // block. It is assumed (and in fact we assert) that the block
543 // belongs to a young region.
544 inline void dirty_young_block(HeapWord* start, size_t word_size);
545
546 // Allocate blocks during garbage collection. Will ensure an
547 // allocation region, either by picking one or expanding the
548 // heap, and then allocate a block of the given size. The block
549 // may not be a humongous - it must fit into a single heap region.
550 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
551 size_t word_size,
552 AllocationContext_t context);
553
554 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
555 HeapRegion* alloc_region,
556 bool par,
557 size_t word_size);
558
559 // Ensure that no further allocations can happen in "r", bearing in mind
560 // that parallel threads might be attempting allocations.
561 void par_allocate_remaining_space(HeapRegion* r);
562
563 // Allocation attempt during GC for a survivor object / PLAB.
564 inline HeapWord* survivor_attempt_allocation(size_t word_size,
565 AllocationContext_t context);
566
567 // Allocation attempt during GC for an old object / PLAB.
568 inline HeapWord* old_attempt_allocation(size_t word_size,
569 AllocationContext_t context);
570
571 // These methods are the "callbacks" from the G1AllocRegion class.
572
573 // For mutator alloc regions.
574 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
575 void retire_mutator_alloc_region(HeapRegion* alloc_region,
576 size_t allocated_bytes);
577
578 // For GC alloc regions.
579 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
580 GCAllocPurpose ap);
581 void retire_gc_alloc_region(HeapRegion* alloc_region,
582 size_t allocated_bytes, GCAllocPurpose ap);
583
584 // - if explicit_gc is true, the GC is for a System.gc() or a heap
585 // inspection request and should collect the entire heap
586 // - if clear_all_soft_refs is true, all soft references should be
587 // cleared during the GC
588 // - if explicit_gc is false, word_size describes the allocation that
589 // the GC should attempt (at least) to satisfy
590 // - it returns false if it is unable to do the collection due to the
591 // GC locker being active, true otherwise
592 bool do_collection(bool explicit_gc,
593 bool clear_all_soft_refs,
594 size_t word_size);
595
596 // Callback from VM_G1CollectFull operation.
597 // Perform a full collection.
598 virtual void do_full_collection(bool clear_all_soft_refs);
599
600 // Resize the heap if necessary after a full collection. If this is
601 // after a collect-for allocation, "word_size" is the allocation size,
602 // and will be considered part of the used portion of the heap.
623 // after processing.
624 void enqueue_discovered_references(uint no_of_gc_workers);
625
626 public:
627
628 G1Allocator* allocator() {
629 return _allocator;
630 }
631
632 G1MonitoringSupport* g1mm() {
633 assert(_g1mm != NULL, "should have been initialized");
634 return _g1mm;
635 }
636
637 // Expand the garbage-first heap by at least the given size (in bytes!).
638 // Returns true if the heap was expanded by the requested amount;
639 // false otherwise.
640 // (Rounds up to a HeapRegion boundary.)
641 bool expand(size_t expand_bytes);
642
643 // Returns the PLAB statistics given a purpose.
644 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
645 PLABStats* stats = NULL;
646
647 switch (purpose) {
648 case GCAllocForSurvived:
649 stats = &_survivor_plab_stats;
650 break;
651 case GCAllocForTenured:
652 stats = &_old_plab_stats;
653 break;
654 default:
655 assert(false, "unrecognized GCAllocPurpose");
656 }
657
658 return stats;
659 }
660
661 // Determines PLAB size for a particular allocation purpose.
662 size_t desired_plab_sz(GCAllocPurpose purpose);
663
664 inline AllocationContextStats& allocation_context_stats();
665
666 // Do anything common to GC's.
667 virtual void gc_prologue(bool full);
668 virtual void gc_epilogue(bool full);
669
670 inline void set_humongous_is_live(oop obj);
671
672 bool humongous_is_live(uint region) {
673 return _humongous_is_live.is_live(region);
674 }
675
676 // Returns whether the given region (which must be a humongous (start) region)
677 // is to be considered conservatively live regardless of any other conditions.
678 bool humongous_region_is_always_live(uint index);
679 // Register the given region to be part of the collection set.
680 inline void register_humongous_region_with_in_cset_fast_test(uint index);
681 // Register regions with humongous objects (actually on the start region) in
682 // the in_cset_fast_test table.
683 void register_humongous_regions_with_in_cset_fast_test();
684 // We register a region with the fast "in collection set" test. We
685 // simply set to true the array slot corresponding to this region.
686 void register_region_with_in_cset_fast_test(HeapRegion* r) {
687 _in_cset_fast_test.set_in_cset(r->hrm_index());
688 }
689
690 // This is a fast test on whether a reference points into the
691 // collection set or not. Assume that the reference
692 // points into the heap.
693 inline bool in_cset_fast_test(oop obj);
694
695 void clear_cset_fast_test() {
696 _in_cset_fast_test.clear();
697 }
698
699 // This is called at the start of either a concurrent cycle or a Full
700 // GC to update the number of old marking cycles started.
701 void increment_old_marking_cycles_started();
702
703 // This is called at the end of either a concurrent cycle or a Full
704 // GC to update the number of old marking cycles completed. Those two
705 // can happen in a nested fashion, i.e., we start a concurrent
706 // cycle, a Full GC happens half-way through it which ends first,
707 // and then the cycle notices that a Full GC happened and ends
1164 // or "next".
1165 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1166 HeapWord* from, HeapWord* limit);
1167
1168 // Verify that the prev / next bitmap range [tams,end) for the given
1169 // region has no marks. Return true if all is well, false if errors
1170 // are detected.
1171 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1172 #endif // PRODUCT
1173
1174 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1175 // the given region do not have any spurious marks. If errors are
1176 // detected, print appropriate error messages and crash.
1177 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1178
1179 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1180 // have any spurious marks. If errors are detected, print
1181 // appropriate error messages and crash.
1182 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1183
1184 // verify_region_sets() performs verification over the region
1185 // lists. It will be compiled in the product code to be used when
1186 // necessary (i.e., during heap verification).
1187 void verify_region_sets();
1188
1189 // verify_region_sets_optional() is planted in the code for
1190 // list verification in non-product builds (and it can be enabled in
1191 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1192 #if HEAP_REGION_SET_FORCE_VERIFY
1193 void verify_region_sets_optional() {
1194 verify_region_sets();
1195 }
1196 #else // HEAP_REGION_SET_FORCE_VERIFY
1197 void verify_region_sets_optional() { }
1198 #endif // HEAP_REGION_SET_FORCE_VERIFY
1199
1200 #ifdef ASSERT
1201 bool is_on_master_free_list(HeapRegion* hr) {
1202 return _hrm.is_free(hr);
1203 }
1259 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1260 void prepend_to_freelist(FreeRegionList* list);
1261 void decrement_summary_bytes(size_t bytes);
1262
1263 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1264 virtual bool is_in(const void* p) const;
1265 #ifdef ASSERT
1266 // Returns whether p is in one of the available areas of the heap. Slow but
1267 // extensive version.
1268 bool is_in_exact(const void* p) const;
1269 #endif
1270
1271 // Return "TRUE" iff the given object address is within the collection
1272 // set. Slow implementation.
1273 inline bool obj_in_cs(oop obj);
1274
1275 inline bool is_in_cset(oop obj);
1276
1277 inline bool is_in_cset_or_humongous(const oop obj);
1278
1279 enum in_cset_state_t {
1280 InNeither, // neither in collection set nor humongous
1281 InCSet, // region is in collection set only
1282 IsHumongous // region is a humongous start region
1283 };
1284 private:
1285 // Instances of this class are used for quick tests on whether a reference points
1286 // into the collection set or is a humongous object (points into a humongous
1287 // object).
1288 // Each of the array's elements denotes whether the corresponding region is in
1289 // the collection set or a humongous region.
1290 // We use this to quickly reclaim humongous objects: by making a humongous region
1291 // succeed this test, we sort-of add it to the collection set. During the reference
1292 // iteration closures, when we see a humongous region, we simply mark it as
1293 // referenced, i.e. live.
1294 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
1295 protected:
1296 char default_value() const { return G1CollectedHeap::InNeither; }
1297 public:
1298 void set_humongous(uintptr_t index) {
1299 assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
1300 set_by_index(index, G1CollectedHeap::IsHumongous);
1301 }
1302
1303 void clear_humongous(uintptr_t index) {
1304 set_by_index(index, G1CollectedHeap::InNeither);
1305 }
1306
1307 void set_in_cset(uintptr_t index) {
1308 assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
1309 set_by_index(index, G1CollectedHeap::InCSet);
1310 }
1311
1312 bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
1313 bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
1314 G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
1315 void clear() { G1BiasedMappedArray<char>::clear(); }
1316 };
1317
1318 // This array is used for a quick test on whether a reference points into
1319 // the collection set or not. Each of the array's elements denotes whether the
1320 // corresponding region is in the collection set or not.
1321 G1FastCSetBiasedMappedArray _in_cset_fast_test;
1322
1323 public:
1324
1325 inline in_cset_state_t in_cset_state(const oop obj);
1326
1327 // Return "TRUE" iff the given object address is in the reserved
1328 // region of g1.
1329 bool is_in_g1_reserved(const void* p) const {
1330 return _hrm.reserved().contains(p);
1331 }
1332
1333 // Returns a MemRegion that corresponds to the space that has been
1334 // reserved for the heap
1335 MemRegion g1_reserved() const {
1336 return _hrm.reserved();
1337 }
1338
1339 virtual bool is_in_closed_subset(const void* p) const;
1340
1341 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
|
530 int* gclocker_retry_count_ret);
531
532 // Allocation attempt that should be called during safepoints (e.g.,
533 // at the end of a successful GC). expect_null_mutator_alloc_region
534 // specifies whether the mutator alloc region is expected to be NULL
535 // or not.
536 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
537 AllocationContext_t context,
538 bool expect_null_mutator_alloc_region);
539
540 // It dirties the cards that cover the block so that so that the post
541 // write barrier never queues anything when updating objects on this
542 // block. It is assumed (and in fact we assert) that the block
543 // belongs to a young region.
544 inline void dirty_young_block(HeapWord* start, size_t word_size);
545
546 // Allocate blocks during garbage collection. Will ensure an
547 // allocation region, either by picking one or expanding the
548 // heap, and then allocate a block of the given size. The block
549 // may not be a humongous - it must fit into a single heap region.
550 HeapWord* par_allocate_during_gc(in_cset_state_t dest,
551 size_t word_size,
552 AllocationContext_t context) {
553 switch (dest) {
554 case InCSetState::Young:
555 return survivor_attempt_allocation(word_size, context);
556 case InCSetState::Old:
557 return old_attempt_allocation(word_size, context);
558 default:
559 assert(false, err_msg("Unknown dest: %d", dest));
560 break;
561 }
562 // keep some compilers happy
563 return NULL;
564 }
565
566 // Ensure that no further allocations can happen in "r", bearing in mind
567 // that parallel threads might be attempting allocations.
568 void par_allocate_remaining_space(HeapRegion* r);
569
570 // Allocation attempt during GC for a survivor object / PLAB.
571 inline HeapWord* survivor_attempt_allocation(size_t word_size,
572 AllocationContext_t context);
573
574 // Allocation attempt during GC for an old object / PLAB.
575 inline HeapWord* old_attempt_allocation(size_t word_size,
576 AllocationContext_t context);
577
578 // These methods are the "callbacks" from the G1AllocRegion class.
579
580 // For mutator alloc regions.
581 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
582 void retire_mutator_alloc_region(HeapRegion* alloc_region,
583 size_t allocated_bytes);
584
585 // For GC alloc regions.
586 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
587 in_cset_state_t dest);
588 void retire_gc_alloc_region(HeapRegion* alloc_region,
589 size_t allocated_bytes, in_cset_state_t dest);
590
591 // - if explicit_gc is true, the GC is for a System.gc() or a heap
592 // inspection request and should collect the entire heap
593 // - if clear_all_soft_refs is true, all soft references should be
594 // cleared during the GC
595 // - if explicit_gc is false, word_size describes the allocation that
596 // the GC should attempt (at least) to satisfy
597 // - it returns false if it is unable to do the collection due to the
598 // GC locker being active, true otherwise
599 bool do_collection(bool explicit_gc,
600 bool clear_all_soft_refs,
601 size_t word_size);
602
603 // Callback from VM_G1CollectFull operation.
604 // Perform a full collection.
605 virtual void do_full_collection(bool clear_all_soft_refs);
606
607 // Resize the heap if necessary after a full collection. If this is
608 // after a collect-for allocation, "word_size" is the allocation size,
609 // and will be considered part of the used portion of the heap.
630 // after processing.
631 void enqueue_discovered_references(uint no_of_gc_workers);
632
633 public:
634
635 G1Allocator* allocator() {
636 return _allocator;
637 }
638
639 G1MonitoringSupport* g1mm() {
640 assert(_g1mm != NULL, "should have been initialized");
641 return _g1mm;
642 }
643
644 // Expand the garbage-first heap by at least the given size (in bytes!).
645 // Returns true if the heap was expanded by the requested amount;
646 // false otherwise.
647 // (Rounds up to a HeapRegion boundary.)
648 bool expand(size_t expand_bytes);
649
650 // Returns the PLAB statistics for a given destination.
651 PLABStats* alloc_buffer_stats(in_cset_state_t dest) {
652 switch (dest) {
653 case InCSetState::Young:
654 return &_survivor_plab_stats;
655 case InCSetState::Old:
656 return &_old_plab_stats;
657 default:
658 assert(false, err_msg("unknown dest: %d", dest));
659 break;
660 }
661 // keep some compilers happy
662 return NULL;
663 }
664
665 // Determines PLAB size for a given destination.
666 size_t desired_plab_sz(in_cset_state_t dest) {
667 size_t gclab_word_size = 0;
668 switch (dest) {
669 case InCSetState::Young:
670 gclab_word_size = _survivor_plab_stats.desired_plab_sz();
671 break;
672 case InCSetState::Old:
673 gclab_word_size = _old_plab_stats.desired_plab_sz();
674 break;
675 default:
676 assert(false, err_msg("Unknown dest: %d", dest));
677 break;
678 }
679
680 // Prevent humongous PLAB sizes for two reasons:
681 // * PLABs are allocated using a similar paths as oops, but should
682 // never be in a humongous region
683 // * Allowing humongous PLABs needlessly churns the region free lists
684 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
685 }
686
687 inline AllocationContextStats& allocation_context_stats();
688
689 // Do anything common to GC's.
690 virtual void gc_prologue(bool full);
691 virtual void gc_epilogue(bool full);
692
693 inline void set_humongous_is_live(oop obj);
694
695 bool humongous_is_live(uint region) {
696 return _humongous_is_live.is_live(region);
697 }
698
699 // Returns whether the given region (which must be a humongous (start) region)
700 // is to be considered conservatively live regardless of any other conditions.
701 bool humongous_region_is_always_live(uint index);
702 // Register the given region to be part of the collection set.
703 inline void register_humongous_region_with_in_cset_fast_test(uint index);
704 // Register regions with humongous objects (actually on the start region) in
705 // the in_cset_fast_test table.
706 void register_humongous_regions_with_in_cset_fast_test();
707 // We register a region with the fast "in collection set" test. We
708 // simply set to true the array slot corresponding to this region.
709 void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
710 _in_cset_fast_test.set_in_young(r->hrm_index());
711 }
712 void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
713 _in_cset_fast_test.set_in_old(r->hrm_index());
714 }
715
716 // This is a fast test on whether a reference points into the
717 // collection set or not. Assume that the reference
718 // points into the heap.
719 inline bool in_cset_fast_test(oop obj);
720
721 void clear_cset_fast_test() {
722 _in_cset_fast_test.clear();
723 }
724
725 // This is called at the start of either a concurrent cycle or a Full
726 // GC to update the number of old marking cycles started.
727 void increment_old_marking_cycles_started();
728
729 // This is called at the end of either a concurrent cycle or a Full
730 // GC to update the number of old marking cycles completed. Those two
731 // can happen in a nested fashion, i.e., we start a concurrent
732 // cycle, a Full GC happens half-way through it which ends first,
733 // and then the cycle notices that a Full GC happened and ends
1190 // or "next".
1191 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1192 HeapWord* from, HeapWord* limit);
1193
1194 // Verify that the prev / next bitmap range [tams,end) for the given
1195 // region has no marks. Return true if all is well, false if errors
1196 // are detected.
1197 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1198 #endif // PRODUCT
1199
1200 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1201 // the given region do not have any spurious marks. If errors are
1202 // detected, print appropriate error messages and crash.
1203 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1204
1205 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1206 // have any spurious marks. If errors are detected, print
1207 // appropriate error messages and crash.
1208 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1209
1210 // Do sanity check on the contents of the in-cset fast test table.
1211 bool check_cset_fast_test();
1212
1213 // verify_region_sets() performs verification over the region
1214 // lists. It will be compiled in the product code to be used when
1215 // necessary (i.e., during heap verification).
1216 void verify_region_sets();
1217
1218 // verify_region_sets_optional() is planted in the code for
1219 // list verification in non-product builds (and it can be enabled in
1220 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1221 #if HEAP_REGION_SET_FORCE_VERIFY
1222 void verify_region_sets_optional() {
1223 verify_region_sets();
1224 }
1225 #else // HEAP_REGION_SET_FORCE_VERIFY
1226 void verify_region_sets_optional() { }
1227 #endif // HEAP_REGION_SET_FORCE_VERIFY
1228
1229 #ifdef ASSERT
1230 bool is_on_master_free_list(HeapRegion* hr) {
1231 return _hrm.is_free(hr);
1232 }
1288 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1289 void prepend_to_freelist(FreeRegionList* list);
1290 void decrement_summary_bytes(size_t bytes);
1291
1292 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1293 virtual bool is_in(const void* p) const;
1294 #ifdef ASSERT
1295 // Returns whether p is in one of the available areas of the heap. Slow but
1296 // extensive version.
1297 bool is_in_exact(const void* p) const;
1298 #endif
1299
1300 // Return "TRUE" iff the given object address is within the collection
1301 // set. Slow implementation.
1302 inline bool obj_in_cs(oop obj);
1303
1304 inline bool is_in_cset(oop obj);
1305
1306 inline bool is_in_cset_or_humongous(const oop obj);
1307
1308 private:
1309 // Instances of this class are used for quick tests on whether a reference points
1310 // into the collection set and into which generation or is a humongous object
1311 //
1312 // Each of the array's elements indicates whether the corresponding region is in
1313 // the collection set and if so in which generation, or a humongous region.
1314 //
1315 // We use this to speed up reference processing during young collection and
1316 // quickly reclaim humongous objects. For the latter, by making a humongous region
1317 // succeed this test, we sort-of add it to the collection set. During the reference
1318 // iteration closures, when we see a humongous region, we then simply mark it as
1319 // referenced, i.e. live.
1320 class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<in_cset_state_t> {
1321 protected:
1322 in_cset_state_t default_value() const { return InCSetState::NotInCSet; }
1323 public:
1324 void set_humongous(uintptr_t index) {
1325 assert(get_by_index(index) == default_value(), "should be default");
1326 set_by_index(index, InCSetState::humongous());
1327 }
1328
1329 void clear_humongous(uintptr_t index) {
1330 set_by_index(index, InCSetState::NotInCSet);
1331 }
1332
1333 void set_in_young(uintptr_t index) {
1334 assert(get_by_index(index) == default_value(), "should be default");
1335 set_by_index(index, InCSetState::Young);
1336 }
1337
1338 void set_in_old(uintptr_t index) {
1339 assert(get_by_index(index) == default_value(), "should be default");
1340 set_by_index(index, InCSetState::Old);
1341 }
1342
1343 bool is_in_cset_or_humongous(HeapWord* addr) const { return InCSetState::is_in_cset_or_humongous(at(addr)); }
1344 bool is_in_cset(HeapWord* addr) const { return InCSetState::is_in_cset(at(addr)); }
1345 in_cset_state_t at(HeapWord* addr) const { return (in_cset_state_t) get_by_address(addr); }
1346 void clear() { G1BiasedMappedArray<in_cset_state_t>::clear(); }
1347 };
1348
1349 // This array is used for a quick test on whether a reference points into
1350 // the collection set or not. Each of the array's elements denotes whether the
1351 // corresponding region is in the collection set or not.
1352 G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1353
1354 public:
1355
1356 inline in_cset_state_t in_cset_state(const oop obj);
1357
1358 // Return "TRUE" iff the given object address is in the reserved
1359 // region of g1.
1360 bool is_in_g1_reserved(const void* p) const {
1361 return _hrm.reserved().contains(p);
1362 }
1363
1364 // Returns a MemRegion that corresponds to the space that has been
1365 // reserved for the heap
1366 MemRegion g1_reserved() const {
1367 return _hrm.reserved();
1368 }
1369
1370 virtual bool is_in_closed_subset(const void* p) const;
1371
1372 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
|