< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ErgoVerbose.hpp"
  39 #include "gc/g1/g1EvacFailure.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"
  41 #include "gc/g1/g1Log.hpp"
  42 #include "gc/g1/g1MarkSweep.hpp"
  43 #include "gc/g1/g1OopClosures.inline.hpp"
  44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc/g1/g1RemSet.inline.hpp"
  47 #include "gc/g1/g1RootProcessor.hpp"
  48 #include "gc/g1/g1StringDedup.hpp"
  49 #include "gc/g1/g1YCTypes.hpp"
  50 #include "gc/g1/heapRegion.inline.hpp"
  51 #include "gc/g1/heapRegionRemSet.hpp"
  52 #include "gc/g1/heapRegionSet.inline.hpp"
  53 #include "gc/g1/suspendibleThreadSet.hpp"
  54 #include "gc/g1/vm_operations_g1.hpp"
  55 #include "gc/shared/gcHeapSummary.hpp"

  56 #include "gc/shared/gcLocker.inline.hpp"
  57 #include "gc/shared/gcTimer.hpp"
  58 #include "gc/shared/gcTrace.hpp"
  59 #include "gc/shared/gcTraceTime.hpp"
  60 #include "gc/shared/generationSpec.hpp"
  61 #include "gc/shared/isGCActiveMark.hpp"
  62 #include "gc/shared/referenceProcessor.hpp"
  63 #include "gc/shared/taskqueue.inline.hpp"
  64 #include "memory/allocation.hpp"
  65 #include "memory/iterator.hpp"
  66 #include "oops/oop.inline.hpp"
  67 #include "runtime/atomic.inline.hpp"
  68 #include "runtime/init.hpp"
  69 #include "runtime/orderAccess.inline.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "utilities/globalDefinitions.hpp"
  72 #include "utilities/stack.inline.hpp"
  73 
  74 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  75 


1440 };
1441 
1442 void G1CollectedHeap::print_hrm_post_compaction() {
1443   PostCompactionPrinterClosure cl(hr_printer());
1444   heap_region_iterate(&cl);
1445 }
1446 
1447 bool G1CollectedHeap::do_collection(bool explicit_gc,
1448                                     bool clear_all_soft_refs,
1449                                     size_t word_size) {
1450   assert_at_safepoint(true /* should_be_vm_thread */);
1451 
1452   if (GC_locker::check_active_before_gc()) {
1453     return false;
1454   }
1455 
1456   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1457   gc_timer->register_gc_start();
1458 
1459   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();

1460   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1461 
1462   SvcGCMarker sgcm(SvcGCMarker::FULL);
1463   ResourceMark rm;
1464 
1465   G1Log::update_level();
1466   print_heap_before_gc();
1467   trace_heap_before_gc(gc_tracer);
1468 
1469   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1470 
1471   verify_region_sets_optional();
1472 
1473   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1474                            collector_policy()->should_clear_all_soft_refs();
1475 
1476   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1477 
1478   {
1479     IsGCActiveMark x;
1480 
1481     // Timing
1482     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1483     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1484 
1485     {
1486       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1487       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1488       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1489 
1490       g1_policy()->record_full_collection_start();
1491 
1492       // Note: When we have a more flexible GC logging framework that
1493       // allows us to add optional attributes to a GC log record we
1494       // could consider timing and reporting how long we wait in the
1495       // following two methods.
1496       wait_while_free_regions_coming();
1497       // If we start the compaction before the CM threads finish
1498       // scanning the root regions we might trip them over as we'll
1499       // be moving objects / updating references. So let's wait until
1500       // they are done. By telling them to abort, they should complete
1501       // early.
1502       _cm->root_regions()->abort();
1503       _cm->root_regions()->wait_until_scan_finished();
1504       append_secondary_free_list_if_not_empty_with_lock();
1505 
1506       gc_prologue(true);


3911     totals += task_queue(i)->stats;
3912   }
3913   st->print_raw("tot "); totals.print(st); st->cr();
3914 
3915   DEBUG_ONLY(totals.verify());
3916 }
3917 
3918 void G1CollectedHeap::reset_taskqueue_stats() {
3919   const uint n = num_task_queues();
3920   for (uint i = 0; i < n; ++i) {
3921     task_queue(i)->stats.reset();
3922   }
3923 }
3924 #endif // TASKQUEUE_STATS
3925 
3926 void G1CollectedHeap::log_gc_header() {
3927   if (!G1Log::fine()) {
3928     return;
3929   }
3930 
3931   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3932 
3933   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3934     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3935     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3936 
3937   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3938 }
3939 
3940 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3941   if (!G1Log::fine()) {
3942     return;
3943   }
3944 
3945   if (G1Log::finer()) {
3946     if (evacuation_failed()) {
3947       gclog_or_tty->print(" (to-space exhausted)");
3948     }
3949     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3950     g1_policy()->phase_times()->note_gc_end();
3951     g1_policy()->phase_times()->print(pause_time_sec);


3969   bool waited = _cm->root_regions()->wait_until_scan_finished();
3970   double wait_time_ms = 0.0;
3971   if (waited) {
3972     double scan_wait_end = os::elapsedTime();
3973     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3974   }
3975   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3976 }
3977 
3978 bool
3979 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3980   assert_at_safepoint(true /* should_be_vm_thread */);
3981   guarantee(!is_gc_active(), "collection is not reentrant");
3982 
3983   if (GC_locker::check_active_before_gc()) {
3984     return false;
3985   }
3986 
3987   _gc_timer_stw->register_gc_start();
3988 

3989   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3990 
3991   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3992   ResourceMark rm;
3993 
3994   wait_for_root_region_scanning();
3995 
3996   G1Log::update_level();
3997   print_heap_before_gc();
3998   trace_heap_before_gc(_gc_tracer_stw);
3999 
4000   verify_region_sets_optional();
4001   verify_dirty_young_regions();
4002 
4003   // This call will decide whether this pause is an initial-mark
4004   // pause. If it is, during_initial_mark_pause() will return true
4005   // for the duration of this pause.
4006   g1_policy()->decide_on_conc_mark_initiation();
4007 
4008   // We do not allow initial-mark to be piggy-backed on a mixed GC.


5543     // We also need to mark copied objects.
5544     copy_non_heap_cl = &copy_mark_non_heap_cl;
5545   }
5546 
5547   // Keep alive closure.
5548   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
5549 
5550   // Serial Complete GC closure
5551   G1STWDrainQueueClosure drain_queue(this, pss);
5552 
5553   // Setup the soft refs policy...
5554   rp->setup_policy(false);
5555 
5556   ReferenceProcessorStats stats;
5557   if (!rp->processing_is_mt()) {
5558     // Serial reference processing...
5559     stats = rp->process_discovered_references(&is_alive,
5560                                               &keep_alive,
5561                                               &drain_queue,
5562                                               NULL,
5563                                               _gc_timer_stw,
5564                                               _gc_tracer_stw->gc_id());
5565   } else {
5566     // Parallel reference processing
5567     assert(rp->num_q() == no_of_gc_workers, "sanity");
5568     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5569 
5570     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
5571     stats = rp->process_discovered_references(&is_alive,
5572                                               &keep_alive,
5573                                               &drain_queue,
5574                                               &par_task_executor,
5575                                               _gc_timer_stw,
5576                                               _gc_tracer_stw->gc_id());
5577   }
5578 
5579   _gc_tracer_stw->report_gc_reference_stats(stats);
5580 
5581   // We have completed copying any necessary live referent objects.
5582   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5583 
5584   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5585   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5586 }
5587 
5588 // Weak Reference processing during an evacuation pause (part 2).
5589 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadState** per_thread_states) {
5590   double ref_enq_start = os::elapsedTime();
5591 
5592   ReferenceProcessor* rp = _ref_processor_stw;
5593   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5594 
5595   // Now enqueue any remaining on the discovered lists on to
5596   // the pending list.




  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ErgoVerbose.hpp"
  39 #include "gc/g1/g1EvacFailure.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"
  41 #include "gc/g1/g1Log.hpp"
  42 #include "gc/g1/g1MarkSweep.hpp"
  43 #include "gc/g1/g1OopClosures.inline.hpp"
  44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc/g1/g1RemSet.inline.hpp"
  47 #include "gc/g1/g1RootProcessor.hpp"
  48 #include "gc/g1/g1StringDedup.hpp"
  49 #include "gc/g1/g1YCTypes.hpp"
  50 #include "gc/g1/heapRegion.inline.hpp"
  51 #include "gc/g1/heapRegionRemSet.hpp"
  52 #include "gc/g1/heapRegionSet.inline.hpp"
  53 #include "gc/g1/suspendibleThreadSet.hpp"
  54 #include "gc/g1/vm_operations_g1.hpp"
  55 #include "gc/shared/gcHeapSummary.hpp"
  56 #include "gc/shared/gcId.hpp"
  57 #include "gc/shared/gcLocker.inline.hpp"
  58 #include "gc/shared/gcTimer.hpp"
  59 #include "gc/shared/gcTrace.hpp"
  60 #include "gc/shared/gcTraceTime.hpp"
  61 #include "gc/shared/generationSpec.hpp"
  62 #include "gc/shared/isGCActiveMark.hpp"
  63 #include "gc/shared/referenceProcessor.hpp"
  64 #include "gc/shared/taskqueue.inline.hpp"
  65 #include "memory/allocation.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/atomic.inline.hpp"
  69 #include "runtime/init.hpp"
  70 #include "runtime/orderAccess.inline.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "utilities/globalDefinitions.hpp"
  73 #include "utilities/stack.inline.hpp"
  74 
  75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  76 


1441 };
1442 
1443 void G1CollectedHeap::print_hrm_post_compaction() {
1444   PostCompactionPrinterClosure cl(hr_printer());
1445   heap_region_iterate(&cl);
1446 }
1447 
1448 bool G1CollectedHeap::do_collection(bool explicit_gc,
1449                                     bool clear_all_soft_refs,
1450                                     size_t word_size) {
1451   assert_at_safepoint(true /* should_be_vm_thread */);
1452 
1453   if (GC_locker::check_active_before_gc()) {
1454     return false;
1455   }
1456 
1457   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1458   gc_timer->register_gc_start();
1459 
1460   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1461   GCIdMark gc_id_mark;
1462   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1463 
1464   SvcGCMarker sgcm(SvcGCMarker::FULL);
1465   ResourceMark rm;
1466 
1467   G1Log::update_level();
1468   print_heap_before_gc();
1469   trace_heap_before_gc(gc_tracer);
1470 
1471   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1472 
1473   verify_region_sets_optional();
1474 
1475   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1476                            collector_policy()->should_clear_all_soft_refs();
1477 
1478   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1479 
1480   {
1481     IsGCActiveMark x;
1482 
1483     // Timing
1484     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1485     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1486 
1487     {
1488       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1489       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1490       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1491 
1492       g1_policy()->record_full_collection_start();
1493 
1494       // Note: When we have a more flexible GC logging framework that
1495       // allows us to add optional attributes to a GC log record we
1496       // could consider timing and reporting how long we wait in the
1497       // following two methods.
1498       wait_while_free_regions_coming();
1499       // If we start the compaction before the CM threads finish
1500       // scanning the root regions we might trip them over as we'll
1501       // be moving objects / updating references. So let's wait until
1502       // they are done. By telling them to abort, they should complete
1503       // early.
1504       _cm->root_regions()->abort();
1505       _cm->root_regions()->wait_until_scan_finished();
1506       append_secondary_free_list_if_not_empty_with_lock();
1507 
1508       gc_prologue(true);


3913     totals += task_queue(i)->stats;
3914   }
3915   st->print_raw("tot "); totals.print(st); st->cr();
3916 
3917   DEBUG_ONLY(totals.verify());
3918 }
3919 
3920 void G1CollectedHeap::reset_taskqueue_stats() {
3921   const uint n = num_task_queues();
3922   for (uint i = 0; i < n; ++i) {
3923     task_queue(i)->stats.reset();
3924   }
3925 }
3926 #endif // TASKQUEUE_STATS
3927 
3928 void G1CollectedHeap::log_gc_header() {
3929   if (!G1Log::fine()) {
3930     return;
3931   }
3932 
3933   gclog_or_tty->gclog_stamp();
3934 
3935   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3936     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3937     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3938 
3939   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3940 }
3941 
3942 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3943   if (!G1Log::fine()) {
3944     return;
3945   }
3946 
3947   if (G1Log::finer()) {
3948     if (evacuation_failed()) {
3949       gclog_or_tty->print(" (to-space exhausted)");
3950     }
3951     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3952     g1_policy()->phase_times()->note_gc_end();
3953     g1_policy()->phase_times()->print(pause_time_sec);


3971   bool waited = _cm->root_regions()->wait_until_scan_finished();
3972   double wait_time_ms = 0.0;
3973   if (waited) {
3974     double scan_wait_end = os::elapsedTime();
3975     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3976   }
3977   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3978 }
3979 
3980 bool
3981 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3982   assert_at_safepoint(true /* should_be_vm_thread */);
3983   guarantee(!is_gc_active(), "collection is not reentrant");
3984 
3985   if (GC_locker::check_active_before_gc()) {
3986     return false;
3987   }
3988 
3989   _gc_timer_stw->register_gc_start();
3990 
3991   GCIdMark gc_id_mark;
3992   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3993 
3994   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3995   ResourceMark rm;
3996 
3997   wait_for_root_region_scanning();
3998 
3999   G1Log::update_level();
4000   print_heap_before_gc();
4001   trace_heap_before_gc(_gc_tracer_stw);
4002 
4003   verify_region_sets_optional();
4004   verify_dirty_young_regions();
4005 
4006   // This call will decide whether this pause is an initial-mark
4007   // pause. If it is, during_initial_mark_pause() will return true
4008   // for the duration of this pause.
4009   g1_policy()->decide_on_conc_mark_initiation();
4010 
4011   // We do not allow initial-mark to be piggy-backed on a mixed GC.


5546     // We also need to mark copied objects.
5547     copy_non_heap_cl = &copy_mark_non_heap_cl;
5548   }
5549 
5550   // Keep alive closure.
5551   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
5552 
5553   // Serial Complete GC closure
5554   G1STWDrainQueueClosure drain_queue(this, pss);
5555 
5556   // Setup the soft refs policy...
5557   rp->setup_policy(false);
5558 
5559   ReferenceProcessorStats stats;
5560   if (!rp->processing_is_mt()) {
5561     // Serial reference processing...
5562     stats = rp->process_discovered_references(&is_alive,
5563                                               &keep_alive,
5564                                               &drain_queue,
5565                                               NULL,
5566                                               _gc_timer_stw);

5567   } else {
5568     // Parallel reference processing
5569     assert(rp->num_q() == no_of_gc_workers, "sanity");
5570     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5571 
5572     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
5573     stats = rp->process_discovered_references(&is_alive,
5574                                               &keep_alive,
5575                                               &drain_queue,
5576                                               &par_task_executor,
5577                                               _gc_timer_stw);

5578   }
5579 
5580   _gc_tracer_stw->report_gc_reference_stats(stats);
5581 
5582   // We have completed copying any necessary live referent objects.
5583   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5584 
5585   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5586   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5587 }
5588 
5589 // Weak Reference processing during an evacuation pause (part 2).
5590 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadState** per_thread_states) {
5591   double ref_enq_start = os::elapsedTime();
5592 
5593   ReferenceProcessor* rp = _ref_processor_stw;
5594   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5595 
5596   // Now enqueue any remaining on the discovered lists on to
5597   // the pending list.


< prev index next >