src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 5685 : 8028128: Add a type safe alternative for working with counter based data
Reviewed-by:


  39 #include "gc_implementation/g1/g1MarkSweep.hpp"
  40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  42 #include "gc_implementation/g1/g1YCTypes.hpp"
  43 #include "gc_implementation/g1/heapRegion.inline.hpp"
  44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  46 #include "gc_implementation/g1/vm_operations_g1.hpp"
  47 #include "gc_implementation/shared/gcHeapSummary.hpp"
  48 #include "gc_implementation/shared/gcTimer.hpp"
  49 #include "gc_implementation/shared/gcTrace.hpp"
  50 #include "gc_implementation/shared/gcTraceTime.hpp"
  51 #include "gc_implementation/shared/isGCActiveMark.hpp"
  52 #include "memory/gcLocker.inline.hpp"
  53 #include "memory/genOopClosures.inline.hpp"
  54 #include "memory/generationSpec.hpp"
  55 #include "memory/referenceProcessor.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/oop.pcgc.inline.hpp"
  58 #include "runtime/vmThread.hpp"

  59 
  60 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  61 
  62 // turn it on so that the contents of the young list (scan-only /
  63 // to-be-collected) are printed at "strategic" points before / during
  64 // / after the collection --- this is useful for debugging
  65 #define YOUNG_LIST_VERBOSE 0
  66 // CURRENT STATUS
  67 // This file is under construction.  Search for "FIXME".
  68 
  69 // INVARIANTS/NOTES
  70 //
  71 // All allocation activity covered by the G1CollectedHeap interface is
  72 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  73 // and allocate_new_tlab, which are the "entry" points to the
  74 // allocation code from the rest of the JVM.  (Note that this does not
  75 // apply to TLAB allocation, which is not part of this interface: it
  76 // is done by clients of this interface.)
  77 
  78 // Notes on implementation of parallelism in different tasks.


1267 
1268   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1269     : _hr_printer(hr_printer) { }
1270 };
1271 
1272 void G1CollectedHeap::print_hrs_post_compaction() {
1273   PostCompactionPrinterClosure cl(hr_printer());
1274   heap_region_iterate(&cl);
1275 }
1276 
1277 bool G1CollectedHeap::do_collection(bool explicit_gc,
1278                                     bool clear_all_soft_refs,
1279                                     size_t word_size) {
1280   assert_at_safepoint(true /* should_be_vm_thread */);
1281 
1282   if (GC_locker::check_active_before_gc()) {
1283     return false;
1284   }
1285 
1286   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1287   gc_timer->register_gc_start(os::elapsed_counter());
1288 
1289   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1290   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1291 
1292   SvcGCMarker sgcm(SvcGCMarker::FULL);
1293   ResourceMark rm;
1294 
1295   print_heap_before_gc();
1296   trace_heap_before_gc(gc_tracer);
1297 
1298   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1299 
1300   HRSPhaseSetter x(HRSPhaseFullGC);
1301   verify_region_sets_optional();
1302 
1303   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1304                            collector_policy()->should_clear_all_soft_refs();
1305 
1306   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1307 


1535       }
1536 
1537       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1538       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1539       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1540       // before any GC notifications are raised.
1541       g1mm()->update_sizes();
1542 
1543       gc_epilogue(true);
1544     }
1545 
1546     if (G1Log::finer()) {
1547       g1_policy()->print_detailed_heap_transition(true /* full */);
1548     }
1549 
1550     print_heap_after_gc();
1551     trace_heap_after_gc(gc_tracer);
1552 
1553     post_full_gc_dump(gc_timer);
1554 
1555     gc_timer->register_gc_end(os::elapsed_counter());
1556     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1557   }
1558 
1559   return true;
1560 }
1561 
1562 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1563   // do_collection() will return whether it succeeded in performing
1564   // the GC. Currently, there is no facility on the
1565   // do_full_collection() API to notify the caller than the collection
1566   // did not succeed (e.g., because it was locked out by the GC
1567   // locker). So, right now, we'll ignore the return value.
1568   bool dummy = do_collection(true,                /* explicit_gc */
1569                              clear_all_soft_refs,
1570                              0                    /* word_size */);
1571 }
1572 
1573 // This code is mostly copied from TenuredGeneration.
1574 void
1575 G1CollectedHeap::


2465                  "is inconsistent with _old_marking_cycles_completed = %u",
2466                  _old_marking_cycles_started, _old_marking_cycles_completed));
2467 
2468   _old_marking_cycles_completed += 1;
2469 
2470   // We need to clear the "in_progress" flag in the CM thread before
2471   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2472   // is set) so that if a waiter requests another System.gc() it doesn't
2473   // incorrectly see that a marking cycle is still in progress.
2474   if (concurrent) {
2475     _cmThread->clear_in_progress();
2476   }
2477 
2478   // This notify_all() will ensure that a thread that called
2479   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2480   // and it's waiting for a full GC to finish will be woken up. It is
2481   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2482   FullGCCount_lock->notify_all();
2483 }
2484 
2485 void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
2486   _concurrent_cycle_started = true;
2487   _gc_timer_cm->register_gc_start(start_time);
2488 
2489   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2490   trace_heap_before_gc(_gc_tracer_cm);
2491 }
2492 
2493 void G1CollectedHeap::register_concurrent_cycle_end() {
2494   if (_concurrent_cycle_started) {
2495     if (_cm->has_aborted()) {
2496       _gc_tracer_cm->report_concurrent_mode_failure();
2497     }
2498 
2499     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2500     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2501 
2502     _concurrent_cycle_started = false;
2503   }
2504 }
2505 
2506 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2507   if (_concurrent_cycle_started) {
2508     trace_heap_after_gc(_gc_tracer_cm);
2509   }
2510 }
2511 
2512 G1YCType G1CollectedHeap::yc_type() {
2513   bool is_young = g1_policy()->gcs_are_young();
2514   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2515   bool is_during_mark = mark_in_progress();
2516 
2517   if (is_initial_mark) {
2518     return InitialMark;
2519   } else if (is_during_mark) {


3870     g1_policy()->print_detailed_heap_transition();
3871   } else {
3872     if (evacuation_failed()) {
3873       gclog_or_tty->print("--");
3874     }
3875     g1_policy()->print_heap_transition();
3876     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3877   }
3878   gclog_or_tty->flush();
3879 }
3880 
3881 bool
3882 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3883   assert_at_safepoint(true /* should_be_vm_thread */);
3884   guarantee(!is_gc_active(), "collection is not reentrant");
3885 
3886   if (GC_locker::check_active_before_gc()) {
3887     return false;
3888   }
3889 
3890   _gc_timer_stw->register_gc_start(os::elapsed_counter());
3891 
3892   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3893 
3894   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3895   ResourceMark rm;
3896 
3897   print_heap_before_gc();
3898   trace_heap_before_gc(_gc_tracer_stw);
3899 
3900   HRSPhaseSetter x(HRSPhaseEvacuation);
3901   verify_region_sets_optional();
3902   verify_dirty_young_regions();
3903 
3904   // This call will decide whether this pause is an initial-mark
3905   // pause. If it is, during_initial_mark_pause() will return true
3906   // for the duration of this pause.
3907   g1_policy()->decide_on_conc_mark_initiation();
3908 
3909   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3910   assert(!g1_policy()->during_initial_mark_pause() ||


4248     // output from the concurrent mark thread interfering with this
4249     // logging output either.
4250 
4251     _hrs.verify_optional();
4252     verify_region_sets_optional();
4253 
4254     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4255     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4256 
4257     print_heap_after_gc();
4258     trace_heap_after_gc(_gc_tracer_stw);
4259 
4260     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4261     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4262     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4263     // before any GC notifications are raised.
4264     g1mm()->update_sizes();
4265 
4266     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4267     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4268     _gc_timer_stw->register_gc_end(os::elapsed_counter());
4269     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4270   }
4271   // It should now be safe to tell the concurrent mark thread to start
4272   // without its logging output interfering with the logging output
4273   // that came from the pause.
4274 
4275   if (should_start_conc_mark) {
4276     // CAUTION: after the doConcurrentMark() call below,
4277     // the concurrent marking thread(s) could be running
4278     // concurrently with us. Make sure that anything after
4279     // this point does not assume that we are the only GC thread
4280     // running. Note: of course, the actual marking work will
4281     // not start until the safepoint itself is released in
4282     // ConcurrentGCThread::safepoint_desynchronize().
4283     doConcurrentMark();
4284   }
4285 
4286   return true;
4287 }
4288 




  39 #include "gc_implementation/g1/g1MarkSweep.hpp"
  40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  42 #include "gc_implementation/g1/g1YCTypes.hpp"
  43 #include "gc_implementation/g1/heapRegion.inline.hpp"
  44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  46 #include "gc_implementation/g1/vm_operations_g1.hpp"
  47 #include "gc_implementation/shared/gcHeapSummary.hpp"
  48 #include "gc_implementation/shared/gcTimer.hpp"
  49 #include "gc_implementation/shared/gcTrace.hpp"
  50 #include "gc_implementation/shared/gcTraceTime.hpp"
  51 #include "gc_implementation/shared/isGCActiveMark.hpp"
  52 #include "memory/gcLocker.inline.hpp"
  53 #include "memory/genOopClosures.inline.hpp"
  54 #include "memory/generationSpec.hpp"
  55 #include "memory/referenceProcessor.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/oop.pcgc.inline.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "utilities/ticks.hpp"
  60 
  61 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  62 
  63 // turn it on so that the contents of the young list (scan-only /
  64 // to-be-collected) are printed at "strategic" points before / during
  65 // / after the collection --- this is useful for debugging
  66 #define YOUNG_LIST_VERBOSE 0
  67 // CURRENT STATUS
  68 // This file is under construction.  Search for "FIXME".
  69 
  70 // INVARIANTS/NOTES
  71 //
  72 // All allocation activity covered by the G1CollectedHeap interface is
  73 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  74 // and allocate_new_tlab, which are the "entry" points to the
  75 // allocation code from the rest of the JVM.  (Note that this does not
  76 // apply to TLAB allocation, which is not part of this interface: it
  77 // is done by clients of this interface.)
  78 
  79 // Notes on implementation of parallelism in different tasks.


1268 
1269   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1270     : _hr_printer(hr_printer) { }
1271 };
1272 
1273 void G1CollectedHeap::print_hrs_post_compaction() {
1274   PostCompactionPrinterClosure cl(hr_printer());
1275   heap_region_iterate(&cl);
1276 }
1277 
1278 bool G1CollectedHeap::do_collection(bool explicit_gc,
1279                                     bool clear_all_soft_refs,
1280                                     size_t word_size) {
1281   assert_at_safepoint(true /* should_be_vm_thread */);
1282 
1283   if (GC_locker::check_active_before_gc()) {
1284     return false;
1285   }
1286 
1287   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1288   gc_timer->register_gc_start();
1289 
1290   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1291   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1292 
1293   SvcGCMarker sgcm(SvcGCMarker::FULL);
1294   ResourceMark rm;
1295 
1296   print_heap_before_gc();
1297   trace_heap_before_gc(gc_tracer);
1298 
1299   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1300 
1301   HRSPhaseSetter x(HRSPhaseFullGC);
1302   verify_region_sets_optional();
1303 
1304   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1305                            collector_policy()->should_clear_all_soft_refs();
1306 
1307   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1308 


1536       }
1537 
1538       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1539       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1540       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1541       // before any GC notifications are raised.
1542       g1mm()->update_sizes();
1543 
1544       gc_epilogue(true);
1545     }
1546 
1547     if (G1Log::finer()) {
1548       g1_policy()->print_detailed_heap_transition(true /* full */);
1549     }
1550 
1551     print_heap_after_gc();
1552     trace_heap_after_gc(gc_tracer);
1553 
1554     post_full_gc_dump(gc_timer);
1555 
1556     gc_timer->register_gc_end();
1557     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1558   }
1559 
1560   return true;
1561 }
1562 
1563 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1564   // do_collection() will return whether it succeeded in performing
1565   // the GC. Currently, there is no facility on the
1566   // do_full_collection() API to notify the caller than the collection
1567   // did not succeed (e.g., because it was locked out by the GC
1568   // locker). So, right now, we'll ignore the return value.
1569   bool dummy = do_collection(true,                /* explicit_gc */
1570                              clear_all_soft_refs,
1571                              0                    /* word_size */);
1572 }
1573 
1574 // This code is mostly copied from TenuredGeneration.
1575 void
1576 G1CollectedHeap::


2466                  "is inconsistent with _old_marking_cycles_completed = %u",
2467                  _old_marking_cycles_started, _old_marking_cycles_completed));
2468 
2469   _old_marking_cycles_completed += 1;
2470 
2471   // We need to clear the "in_progress" flag in the CM thread before
2472   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2473   // is set) so that if a waiter requests another System.gc() it doesn't
2474   // incorrectly see that a marking cycle is still in progress.
2475   if (concurrent) {
2476     _cmThread->clear_in_progress();
2477   }
2478 
2479   // This notify_all() will ensure that a thread that called
2480   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2481   // and it's waiting for a full GC to finish will be woken up. It is
2482   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2483   FullGCCount_lock->notify_all();
2484 }
2485 
2486 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2487   _concurrent_cycle_started = true;
2488   _gc_timer_cm->register_gc_start(start_time);
2489 
2490   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2491   trace_heap_before_gc(_gc_tracer_cm);
2492 }
2493 
2494 void G1CollectedHeap::register_concurrent_cycle_end() {
2495   if (_concurrent_cycle_started) {
2496     if (_cm->has_aborted()) {
2497       _gc_tracer_cm->report_concurrent_mode_failure();
2498     }
2499 
2500     _gc_timer_cm->register_gc_end();
2501     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2502 
2503     _concurrent_cycle_started = false;
2504   }
2505 }
2506 
2507 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2508   if (_concurrent_cycle_started) {
2509     trace_heap_after_gc(_gc_tracer_cm);
2510   }
2511 }
2512 
2513 G1YCType G1CollectedHeap::yc_type() {
2514   bool is_young = g1_policy()->gcs_are_young();
2515   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2516   bool is_during_mark = mark_in_progress();
2517 
2518   if (is_initial_mark) {
2519     return InitialMark;
2520   } else if (is_during_mark) {


3871     g1_policy()->print_detailed_heap_transition();
3872   } else {
3873     if (evacuation_failed()) {
3874       gclog_or_tty->print("--");
3875     }
3876     g1_policy()->print_heap_transition();
3877     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3878   }
3879   gclog_or_tty->flush();
3880 }
3881 
3882 bool
3883 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3884   assert_at_safepoint(true /* should_be_vm_thread */);
3885   guarantee(!is_gc_active(), "collection is not reentrant");
3886 
3887   if (GC_locker::check_active_before_gc()) {
3888     return false;
3889   }
3890 
3891   _gc_timer_stw->register_gc_start();
3892 
3893   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3894 
3895   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3896   ResourceMark rm;
3897 
3898   print_heap_before_gc();
3899   trace_heap_before_gc(_gc_tracer_stw);
3900 
3901   HRSPhaseSetter x(HRSPhaseEvacuation);
3902   verify_region_sets_optional();
3903   verify_dirty_young_regions();
3904 
3905   // This call will decide whether this pause is an initial-mark
3906   // pause. If it is, during_initial_mark_pause() will return true
3907   // for the duration of this pause.
3908   g1_policy()->decide_on_conc_mark_initiation();
3909 
3910   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3911   assert(!g1_policy()->during_initial_mark_pause() ||


4249     // output from the concurrent mark thread interfering with this
4250     // logging output either.
4251 
4252     _hrs.verify_optional();
4253     verify_region_sets_optional();
4254 
4255     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4256     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4257 
4258     print_heap_after_gc();
4259     trace_heap_after_gc(_gc_tracer_stw);
4260 
4261     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4262     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4263     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4264     // before any GC notifications are raised.
4265     g1mm()->update_sizes();
4266 
4267     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4268     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4269     _gc_timer_stw->register_gc_end();
4270     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4271   }
4272   // It should now be safe to tell the concurrent mark thread to start
4273   // without its logging output interfering with the logging output
4274   // that came from the pause.
4275 
4276   if (should_start_conc_mark) {
4277     // CAUTION: after the doConcurrentMark() call below,
4278     // the concurrent marking thread(s) could be running
4279     // concurrently with us. Make sure that anything after
4280     // this point does not assume that we are the only GC thread
4281     // running. Note: of course, the actual marking work will
4282     // not start until the safepoint itself is released in
4283     // ConcurrentGCThread::safepoint_desynchronize().
4284     doConcurrentMark();
4285   }
4286 
4287   return true;
4288 }
4289