< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13070 : imported patch 8177544-full-gc-scope
rev 13071 : [mq]: 8177544-full-gc-scope-tschatzl-rev1


  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectionSet.hpp"
  38 #include "gc/g1/g1CollectorPolicy.hpp"
  39 #include "gc/g1/g1CollectorState.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullGCScope.hpp"
  42 #include "gc/g1/g1GCPhaseTimes.hpp"
  43 #include "gc/g1/g1HeapSizingPolicy.hpp"
  44 #include "gc/g1/g1HeapTransition.hpp"
  45 #include "gc/g1/g1HeapVerifier.hpp"
  46 #include "gc/g1/g1HotCardCache.hpp"
  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.inline.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1SerialCollector.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/heapRegion.inline.hpp"
  58 #include "gc/g1/heapRegionRemSet.hpp"
  59 #include "gc/g1/heapRegionSet.inline.hpp"
  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/allocation.hpp"


1119   // refinement, if any are in progress.
1120   concurrent_mark()->abort();
1121 }
1122 
1123 void G1CollectedHeap::prepare_heap_for_full_collection() {
1124   // Make sure we'll choose a new allocation region afterwards.
1125   _allocator->release_mutator_alloc_region();
1126   _allocator->abandon_gc_alloc_regions();
1127   g1_rem_set()->cleanupHRRS();
1128 
1129   // We may have added regions to the current incremental collection
1130   // set between the last GC or pause and now. We need to clear the
1131   // incremental collection set and then start rebuilding it afresh
1132   // after this full GC.
1133   abandon_collection_set(collection_set());
1134 
1135   tear_down_region_sets(false /* free_list_only */);
1136   collector_state()->set_gcs_are_young(true);
1137 }
1138 
1139 void G1CollectedHeap::reset_card_cache_and_queue() {































1140   if (_hot_card_cache->use_cache()) {
1141     _hot_card_cache->reset_card_counts();
1142     _hot_card_cache->reset_hot_cache();
1143   }
1144 
1145   // Discard all stale remembered set updates.
1146   JavaThread::dirty_card_queue_set().abandon_logs();
1147   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1148 }
1149 
1150 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1151   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1152   assert(used() == recalculate_used(), "Should be equal");
1153   _verifier->verify_region_sets_optional();
1154   _verifier->verify_before_gc();
1155   _verifier->check_bitmaps("Full GC Start");
1156 }
1157 
1158 void G1CollectedHeap::verify_after_full_collection() {
1159   check_gc_time_stamps();
1160   _hrm.verify_optional();
1161   _verifier->verify_region_sets_optional();
1162   _verifier->verify_after_gc();
1163   // Clear the previous marking bitmap, if needed for bitmap verification.
1164   // Note we cannot do this when we clear the next marking bitmap in
1165   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1166   // objects marked during a full GC against the previous bitmap.
1167   // But we need to clear it before calling check_bitmaps below since
1168   // the full GC has compacted objects and updated TAMS but not updated
1169   // the prev bitmap.
1170   if (G1VerifyBitmaps) {
1171     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1172     _cm->clear_prev_bitmap(workers());
1173   }
1174   _verifier->check_bitmaps("Full GC End");
1175 
1176   // At this point there should be no regions in the
1177   // entire heap tagged as young.
1178   assert(check_young_list_empty(), "young list should be empty at this point");
1179 
1180   // Note: since we've just done a full GC, concurrent
1181   // marking is no longer active. Therefore we need not
1182   // re-enable reference discovery for the CM ref processor.
1183   // That will be done at the start of the next marking cycle.
1184   // We also know that the STW processor should no longer
1185   // discover any new references.
1186   assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1187   assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1188   ref_processor_stw()->verify_no_references_recorded();
1189   ref_processor_cm()->verify_no_references_recorded();
1190 }
1191 










1192 void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
1193   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1194   G1HeapTransition heap_transition(this);
1195   g1_policy()->record_full_collection_start();
1196 
1197   print_heap_before_gc();
1198   print_heap_regions();
1199 
1200   abort_concurrent_cycle();
1201   verify_before_full_collection(scope->is_explicit_gc());
1202 
1203   gc_prologue(true);
1204   prepare_heap_for_full_collection();
1205 
1206   G1SerialCollector serial(scope, ref_processor_stw());
1207   serial.prepare_collection();
1208   serial.collect();
1209   serial.complete_collection();
1210 
1211   assert(num_free_regions() == 0, "we should not have added any free regions");
1212   MemoryService::track_memory_usage();
1213 
1214   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1215   ClassLoaderDataGraph::purge();
1216   MetaspaceAux::verify_metrics();
1217 
1218   // Prepare heap for normal collections.
1219   rebuild_region_sets(false /* free_list_only */);
1220   reset_card_cache_and_queue();
1221   resize_if_necessary_after_full_collection();
1222 
1223   // Rebuild the strong code root lists for each region
1224   rebuild_strong_code_roots();
1225 
1226   // Start a new incremental collection set for the next pause
1227   start_new_collection_set();
1228 
1229   _allocator->init_mutator_alloc_region();
1230 
1231   // Post collection state updates.
1232   MetaspaceGC::compute_new_size();
1233   gc_epilogue(true);
1234   g1_policy()->record_full_collection_end();

1235 
1236   // Post collection verification.
1237   verify_after_full_collection();
1238 
1239   // Post collection logging.
1240   // We should do this after we potentially resize the heap so
1241   // that all the COMMIT / UNCOMMIT events are generated before
1242   // the compaction events.
1243   print_hrm_post_compaction();
1244   heap_transition.print();
1245   print_heap_after_gc();
1246   print_heap_regions();
1247 #ifdef TRACESPINNING
1248   ParallelTaskTerminator::print_termination_counts();
1249 #endif
1250 }
1251 
1252 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1253                                          bool clear_all_soft_refs) {
1254   assert_at_safepoint(true /* should_be_vm_thread */);
1255 
1256   if (GCLocker::check_active_before_gc()) {
1257     // Full GC was not completed.
1258     return false;
1259   }
1260 
1261   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1262       collector_policy()->should_clear_all_soft_refs();
1263 
1264   G1FullGCScope scope(explicit_gc, do_clear_all_soft_refs);
1265   do_full_collection_inner(&scope);
1266 
1267   // Full collection was successfully completed.
1268   return true;
1269 }


2590     increment_old_marking_cycles_completed(false /* concurrent */);
2591   }
2592 
2593   // We are at the end of the GC. Total collections has already been increased.
2594   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2595 
2596   // FIXME: what is this about?
2597   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2598   // is set.
2599 #if defined(COMPILER2) || INCLUDE_JVMCI
2600   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2601 #endif
2602   // always_do_update_barrier = true;
2603 
2604   double start = os::elapsedTime();
2605   resize_all_tlabs();
2606   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2607 
2608   allocation_context_stats().update(full);
2609 

2610   // We have just completed a GC. Update the soft reference
2611   // policy with the new heap occupancy
2612   Universe::update_heap_info_at_gc();
2613 }
2614 
2615 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2616                                                uint gc_count_before,
2617                                                bool* succeeded,
2618                                                GCCause::Cause gc_cause) {
2619   assert_heap_not_locked_and_not_at_safepoint();
2620   VM_G1IncCollectionPause op(gc_count_before,
2621                              word_size,
2622                              false, /* should_initiate_conc_mark */
2623                              g1_policy()->max_pause_time_ms(),
2624                              gc_cause);
2625 
2626   op.set_allocation_context(AllocationContext::current());
2627   VMThread::execute(&op);
2628 
2629   HeapWord* result = op.result();


3139               // We failed to expand the heap. Cannot do anything about it.
3140             }
3141             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3142           }
3143         }
3144 
3145         // We redo the verification but now wrt to the new CSet which
3146         // has just got initialized after the previous CSet was freed.
3147         _cm->verify_no_cset_oops();
3148 
3149         // This timing is only used by the ergonomics to handle our pause target.
3150         // It is unclear why this should not include the full pause. We will
3151         // investigate this in CR 7178365.
3152         double sample_end_time_sec = os::elapsedTime();
3153         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3154         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3155         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3156 
3157         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3158         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3159 
3160         MemoryService::track_memory_usage();
3161 
3162         // In prepare_for_verify() below we'll need to scan the deferred
3163         // update buffers to bring the RSets up-to-date if
3164         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3165         // the update buffers we'll probably need to scan cards on the
3166         // regions we just allocated to (i.e., the GC alloc
3167         // regions). However, during the last GC we called
3168         // set_saved_mark() on all the GC alloc regions, so card
3169         // scanning might skip the [saved_mark_word()...top()] area of
3170         // those regions (i.e., the area we allocated objects into
3171         // during the last GC). But it shouldn't. Given that
3172         // saved_mark_word() is conditional on whether the GC time stamp
3173         // on the region is current or not, by incrementing the GC time
3174         // stamp here we invalidate all the GC time stamps on all the
3175         // regions and saved_mark_word() will simply return top() for
3176         // all the regions. This is a nicer way of ensuring this rather
3177         // than iterating over the regions and fixing them. In fact, the
3178         // GC time stamp increment here also ensures that
3179         // saved_mark_word() will return top() between pauses, i.e.,
3180         // during concurrent refinement. So we don't need the




  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectionSet.hpp"
  38 #include "gc/g1/g1CollectorPolicy.hpp"
  39 #include "gc/g1/g1CollectorState.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullGCScope.hpp"
  42 #include "gc/g1/g1GCPhaseTimes.hpp"
  43 #include "gc/g1/g1HeapSizingPolicy.hpp"
  44 #include "gc/g1/g1HeapTransition.hpp"
  45 #include "gc/g1/g1HeapVerifier.hpp"
  46 #include "gc/g1/g1HotCardCache.hpp"
  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.inline.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1SerialFullCollector.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/heapRegion.inline.hpp"
  58 #include "gc/g1/heapRegionRemSet.hpp"
  59 #include "gc/g1/heapRegionSet.inline.hpp"
  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/allocation.hpp"


1119   // refinement, if any are in progress.
1120   concurrent_mark()->abort();
1121 }
1122 
1123 void G1CollectedHeap::prepare_heap_for_full_collection() {
1124   // Make sure we'll choose a new allocation region afterwards.
1125   _allocator->release_mutator_alloc_region();
1126   _allocator->abandon_gc_alloc_regions();
1127   g1_rem_set()->cleanupHRRS();
1128 
1129   // We may have added regions to the current incremental collection
1130   // set between the last GC or pause and now. We need to clear the
1131   // incremental collection set and then start rebuilding it afresh
1132   // after this full GC.
1133   abandon_collection_set(collection_set());
1134 
1135   tear_down_region_sets(false /* free_list_only */);
1136   collector_state()->set_gcs_are_young(true);
1137 }
1138 
1139 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1140   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1141   assert(used() == recalculate_used(), "Should be equal");
1142   _verifier->verify_region_sets_optional();
1143   _verifier->verify_before_gc();
1144   _verifier->check_bitmaps("Full GC Start");
1145 }
1146 
1147 void G1CollectedHeap::prepare_heap_for_mutators() {
1148   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1149   ClassLoaderDataGraph::purge();
1150   MetaspaceAux::verify_metrics();
1151 
1152   // Prepare heap for normal collections.
1153   assert(num_free_regions() == 0, "we should not have added any free regions");
1154   rebuild_region_sets(false /* free_list_only */);
1155   abort_refinement();
1156   resize_if_necessary_after_full_collection();
1157 
1158   // Rebuild the strong code root lists for each region
1159   rebuild_strong_code_roots();
1160 
1161   // Start a new incremental collection set for the next pause
1162   start_new_collection_set();
1163 
1164   _allocator->init_mutator_alloc_region();
1165 
1166   // Post collection state updates.
1167   MetaspaceGC::compute_new_size();
1168 }
1169 
1170 void G1CollectedHeap::abort_refinement() {
1171   if (_hot_card_cache->use_cache()) {
1172     _hot_card_cache->reset_card_counts();
1173     _hot_card_cache->reset_hot_cache();
1174   }
1175 
1176   // Discard all remembered set updates.
1177   JavaThread::dirty_card_queue_set().abandon_logs();
1178   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1179 }
1180 








1181 void G1CollectedHeap::verify_after_full_collection() {
1182   check_gc_time_stamps();
1183   _hrm.verify_optional();
1184   _verifier->verify_region_sets_optional();
1185   _verifier->verify_after_gc();
1186   // Clear the previous marking bitmap, if needed for bitmap verification.
1187   // Note we cannot do this when we clear the next marking bitmap in
1188   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1189   // objects marked during a full GC against the previous bitmap.
1190   // But we need to clear it before calling check_bitmaps below since
1191   // the full GC has compacted objects and updated TAMS but not updated
1192   // the prev bitmap.
1193   if (G1VerifyBitmaps) {
1194     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1195     _cm->clear_prev_bitmap(workers());
1196   }
1197   _verifier->check_bitmaps("Full GC End");
1198 
1199   // At this point there should be no regions in the
1200   // entire heap tagged as young.
1201   assert(check_young_list_empty(), "young list should be empty at this point");
1202 
1203   // Note: since we've just done a full GC, concurrent
1204   // marking is no longer active. Therefore we need not
1205   // re-enable reference discovery for the CM ref processor.
1206   // That will be done at the start of the next marking cycle.
1207   // We also know that the STW processor should no longer
1208   // discover any new references.
1209   assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1210   assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1211   ref_processor_stw()->verify_no_references_recorded();
1212   ref_processor_cm()->verify_no_references_recorded();
1213 }
1214 
1215 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1216   print_hrm_post_compaction();
1217   heap_transition->print();
1218   print_heap_after_gc();
1219   print_heap_regions();
1220 #ifdef TRACESPINNING
1221   ParallelTaskTerminator::print_termination_counts();
1222 #endif
1223 }
1224 
1225 void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
1226   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);

1227   g1_policy()->record_full_collection_start();
1228 
1229   print_heap_before_gc();
1230   print_heap_regions();
1231 
1232   abort_concurrent_cycle();
1233   verify_before_full_collection(scope->is_explicit_gc());
1234 
1235   gc_prologue(true);
1236   prepare_heap_for_full_collection();
1237 
1238   G1SerialFullCollector serial(scope, ref_processor_stw());
1239   serial.prepare_collection();
1240   serial.collect();
1241   serial.complete_collection();
1242 
1243   prepare_heap_for_mutators();


















1244 



1245   g1_policy()->record_full_collection_end();
1246   gc_epilogue(true);
1247 
1248   // Post collection verification.
1249   verify_after_full_collection();
1250 
1251   // Post collection logging.
1252   // We should do this after we potentially resize the heap so
1253   // that all the COMMIT / UNCOMMIT events are generated before
1254   // the compaction events.
1255   print_heap_after_full_collection(scope->heap_transition());






1256 }
1257 
1258 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1259                                          bool clear_all_soft_refs) {
1260   assert_at_safepoint(true /* should_be_vm_thread */);
1261 
1262   if (GCLocker::check_active_before_gc()) {
1263     // Full GC was not completed.
1264     return false;
1265   }
1266 
1267   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1268       collector_policy()->should_clear_all_soft_refs();
1269 
1270   G1FullGCScope scope(explicit_gc, do_clear_all_soft_refs);
1271   do_full_collection_inner(&scope);
1272 
1273   // Full collection was successfully completed.
1274   return true;
1275 }


2596     increment_old_marking_cycles_completed(false /* concurrent */);
2597   }
2598 
2599   // We are at the end of the GC. Total collections has already been increased.
2600   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2601 
2602   // FIXME: what is this about?
2603   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2604   // is set.
2605 #if defined(COMPILER2) || INCLUDE_JVMCI
2606   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2607 #endif
2608   // always_do_update_barrier = true;
2609 
2610   double start = os::elapsedTime();
2611   resize_all_tlabs();
2612   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2613 
2614   allocation_context_stats().update(full);
2615 
2616   MemoryService::track_memory_usage();
2617   // We have just completed a GC. Update the soft reference
2618   // policy with the new heap occupancy
2619   Universe::update_heap_info_at_gc();
2620 }
2621 
2622 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2623                                                uint gc_count_before,
2624                                                bool* succeeded,
2625                                                GCCause::Cause gc_cause) {
2626   assert_heap_not_locked_and_not_at_safepoint();
2627   VM_G1IncCollectionPause op(gc_count_before,
2628                              word_size,
2629                              false, /* should_initiate_conc_mark */
2630                              g1_policy()->max_pause_time_ms(),
2631                              gc_cause);
2632 
2633   op.set_allocation_context(AllocationContext::current());
2634   VMThread::execute(&op);
2635 
2636   HeapWord* result = op.result();


3146               // We failed to expand the heap. Cannot do anything about it.
3147             }
3148             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3149           }
3150         }
3151 
3152         // We redo the verification but now wrt to the new CSet which
3153         // has just got initialized after the previous CSet was freed.
3154         _cm->verify_no_cset_oops();
3155 
3156         // This timing is only used by the ergonomics to handle our pause target.
3157         // It is unclear why this should not include the full pause. We will
3158         // investigate this in CR 7178365.
3159         double sample_end_time_sec = os::elapsedTime();
3160         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3161         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3162         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3163 
3164         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3165         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());


3166 
3167         // In prepare_for_verify() below we'll need to scan the deferred
3168         // update buffers to bring the RSets up-to-date if
3169         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3170         // the update buffers we'll probably need to scan cards on the
3171         // regions we just allocated to (i.e., the GC alloc
3172         // regions). However, during the last GC we called
3173         // set_saved_mark() on all the GC alloc regions, so card
3174         // scanning might skip the [saved_mark_word()...top()] area of
3175         // those regions (i.e., the area we allocated objects into
3176         // during the last GC). But it shouldn't. Given that
3177         // saved_mark_word() is conditional on whether the GC time stamp
3178         // on the region is current or not, by incrementing the GC time
3179         // stamp here we invalidate all the GC time stamps on all the
3180         // regions and saved_mark_word() will simply return top() for
3181         // all the regions. This is a nicer way of ensuring this rather
3182         // than iterating over the regions and fixing them. In fact, the
3183         // GC time stamp increment here also ensures that
3184         // saved_mark_word() will return top() between pauses, i.e.,
3185         // during concurrent refinement. So we don't need the


< prev index next >