36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ErgoVerbose.hpp"
39 #include "gc/g1/g1EvacFailure.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1Log.hpp"
42 #include "gc/g1/g1MarkSweep.hpp"
43 #include "gc/g1/g1OopClosures.inline.hpp"
44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
46 #include "gc/g1/g1RemSet.inline.hpp"
47 #include "gc/g1/g1RootProcessor.hpp"
48 #include "gc/g1/g1StringDedup.hpp"
49 #include "gc/g1/g1YCTypes.hpp"
50 #include "gc/g1/heapRegion.inline.hpp"
51 #include "gc/g1/heapRegionRemSet.hpp"
52 #include "gc/g1/heapRegionSet.inline.hpp"
53 #include "gc/g1/suspendibleThreadSet.hpp"
54 #include "gc/g1/vm_operations_g1.hpp"
55 #include "gc/shared/gcHeapSummary.hpp"
56 #include "gc/shared/gcLocker.inline.hpp"
57 #include "gc/shared/gcTimer.hpp"
58 #include "gc/shared/gcTrace.hpp"
59 #include "gc/shared/gcTraceTime.hpp"
60 #include "gc/shared/generationSpec.hpp"
61 #include "gc/shared/isGCActiveMark.hpp"
62 #include "gc/shared/referenceProcessor.hpp"
63 #include "gc/shared/taskqueue.inline.hpp"
64 #include "memory/allocation.hpp"
65 #include "memory/iterator.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "runtime/atomic.inline.hpp"
68 #include "runtime/init.hpp"
69 #include "runtime/orderAccess.inline.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "utilities/globalDefinitions.hpp"
72 #include "utilities/stack.inline.hpp"
73
74 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
75
1440 };
1441
1442 void G1CollectedHeap::print_hrm_post_compaction() {
1443 PostCompactionPrinterClosure cl(hr_printer());
1444 heap_region_iterate(&cl);
1445 }
1446
1447 bool G1CollectedHeap::do_collection(bool explicit_gc,
1448 bool clear_all_soft_refs,
1449 size_t word_size) {
1450 assert_at_safepoint(true /* should_be_vm_thread */);
1451
1452 if (GC_locker::check_active_before_gc()) {
1453 return false;
1454 }
1455
1456 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1457 gc_timer->register_gc_start();
1458
1459 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1460 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1461
1462 SvcGCMarker sgcm(SvcGCMarker::FULL);
1463 ResourceMark rm;
1464
1465 G1Log::update_level();
1466 print_heap_before_gc();
1467 trace_heap_before_gc(gc_tracer);
1468
1469 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1470
1471 verify_region_sets_optional();
1472
1473 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1474 collector_policy()->should_clear_all_soft_refs();
1475
1476 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1477
1478 {
1479 IsGCActiveMark x;
1480
1481 // Timing
1482 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1483 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1484
1485 {
1486 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1487 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1488 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1489
1490 g1_policy()->record_full_collection_start();
1491
1492 // Note: When we have a more flexible GC logging framework that
1493 // allows us to add optional attributes to a GC log record we
1494 // could consider timing and reporting how long we wait in the
1495 // following two methods.
1496 wait_while_free_regions_coming();
1497 // If we start the compaction before the CM threads finish
1498 // scanning the root regions we might trip them over as we'll
1499 // be moving objects / updating references. So let's wait until
1500 // they are done. By telling them to abort, they should complete
1501 // early.
1502 _cm->root_regions()->abort();
1503 _cm->root_regions()->wait_until_scan_finished();
1504 append_secondary_free_list_if_not_empty_with_lock();
1505
1506 gc_prologue(true);
3915 totals += task_queue(i)->stats;
3916 }
3917 st->print_raw("tot "); totals.print(st); st->cr();
3918
3919 DEBUG_ONLY(totals.verify());
3920 }
3921
3922 void G1CollectedHeap::reset_taskqueue_stats() {
3923 const uint n = num_task_queues();
3924 for (uint i = 0; i < n; ++i) {
3925 task_queue(i)->stats.reset();
3926 }
3927 }
3928 #endif // TASKQUEUE_STATS
3929
3930 void G1CollectedHeap::log_gc_header() {
3931 if (!G1Log::fine()) {
3932 return;
3933 }
3934
3935 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3936
3937 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3938 .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3939 .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3940
3941 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3942 }
3943
3944 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3945 if (!G1Log::fine()) {
3946 return;
3947 }
3948
3949 if (G1Log::finer()) {
3950 if (evacuation_failed()) {
3951 gclog_or_tty->print(" (to-space exhausted)");
3952 }
3953 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3954 g1_policy()->phase_times()->note_gc_end();
3955 g1_policy()->phase_times()->print(pause_time_sec);
3973 bool waited = _cm->root_regions()->wait_until_scan_finished();
3974 double wait_time_ms = 0.0;
3975 if (waited) {
3976 double scan_wait_end = os::elapsedTime();
3977 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3978 }
3979 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3980 }
3981
3982 bool
3983 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3984 assert_at_safepoint(true /* should_be_vm_thread */);
3985 guarantee(!is_gc_active(), "collection is not reentrant");
3986
3987 if (GC_locker::check_active_before_gc()) {
3988 return false;
3989 }
3990
3991 _gc_timer_stw->register_gc_start();
3992
3993 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3994
3995 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3996 ResourceMark rm;
3997
3998 wait_for_root_region_scanning();
3999
4000 G1Log::update_level();
4001 print_heap_before_gc();
4002 trace_heap_before_gc(_gc_tracer_stw);
4003
4004 verify_region_sets_optional();
4005 verify_dirty_young_regions();
4006
4007 // This call will decide whether this pause is an initial-mark
4008 // pause. If it is, during_initial_mark_pause() will return true
4009 // for the duration of this pause.
4010 g1_policy()->decide_on_conc_mark_initiation();
4011
4012 // We do not allow initial-mark to be piggy-backed on a mixed GC.
4013 assert(!collector_state()->during_initial_mark_pause() ||
4014 collector_state()->gcs_are_young(), "sanity");
4015
4016 // We also do not allow mixed GCs during marking.
4017 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
4018
4019 // Record whether this pause is an initial mark. When the current
4020 // thread has completed its logging output and it's safe to signal
4021 // the CM thread, the flag's value in the policy has been reset.
4022 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
4023
4024 // Inner scope for scope based logging, timers, and stats collection
4025 {
4026 EvacuationInfo evacuation_info;
4027
4028 if (collector_state()->during_initial_mark_pause()) {
4029 // We are about to start a marking cycle, so we increment the
4030 // full collection counter.
4031 increment_old_marking_cycles_started();
4032 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4033 }
4034
4035 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
4036
4037 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4038
4039 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4040 workers()->active_workers(),
4041 Threads::number_of_non_daemon_threads());
4042 workers()->set_active_workers(active_workers);
4326 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4327 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4328
4329 print_heap_after_gc();
4330 trace_heap_after_gc(_gc_tracer_stw);
4331
4332 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4333 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4334 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4335 // before any GC notifications are raised.
4336 g1mm()->update_sizes();
4337
4338 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4339 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4340 _gc_timer_stw->register_gc_end();
4341 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4342 }
4343 // It should now be safe to tell the concurrent mark thread to start
4344 // without its logging output interfering with the logging output
4345 // that came from the pause.
4346
4347 if (should_start_conc_mark) {
4348 // CAUTION: after the doConcurrentMark() call below,
4349 // the concurrent marking thread(s) could be running
4350 // concurrently with us. Make sure that anything after
4351 // this point does not assume that we are the only GC thread
4352 // running. Note: of course, the actual marking work will
4353 // not start until the safepoint itself is released in
4354 // SuspendibleThreadSet::desynchronize().
4355 doConcurrentMark();
4356 }
4357
4358 return true;
4359 }
4360
4361 void G1CollectedHeap::remove_self_forwarding_pointers() {
4362 double remove_self_forwards_start = os::elapsedTime();
4363
4364 G1ParRemoveSelfForwardPtrsTask rsfp_task;
4365 workers()->run_task(&rsfp_task);
5548 // We also need to mark copied objects.
5549 copy_non_heap_cl = ©_mark_non_heap_cl;
5550 }
5551
5552 // Keep alive closure.
5553 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
5554
5555 // Serial Complete GC closure
5556 G1STWDrainQueueClosure drain_queue(this, pss);
5557
5558 // Setup the soft refs policy...
5559 rp->setup_policy(false);
5560
5561 ReferenceProcessorStats stats;
5562 if (!rp->processing_is_mt()) {
5563 // Serial reference processing...
5564 stats = rp->process_discovered_references(&is_alive,
5565 &keep_alive,
5566 &drain_queue,
5567 NULL,
5568 _gc_timer_stw,
5569 _gc_tracer_stw->gc_id());
5570 } else {
5571 // Parallel reference processing
5572 assert(rp->num_q() == no_of_gc_workers, "sanity");
5573 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5574
5575 G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
5576 stats = rp->process_discovered_references(&is_alive,
5577 &keep_alive,
5578 &drain_queue,
5579 &par_task_executor,
5580 _gc_timer_stw,
5581 _gc_tracer_stw->gc_id());
5582 }
5583
5584 _gc_tracer_stw->report_gc_reference_stats(stats);
5585
5586 // We have completed copying any necessary live referent objects.
5587 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5588
5589 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5590 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5591 }
5592
5593 // Weak Reference processing during an evacuation pause (part 2).
5594 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
5595 double ref_enq_start = os::elapsedTime();
5596
5597 ReferenceProcessor* rp = _ref_processor_stw;
5598 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5599
5600 // Now enqueue any remaining on the discovered lists on to
5601 // the pending list.
|
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ErgoVerbose.hpp"
39 #include "gc/g1/g1EvacFailure.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1Log.hpp"
42 #include "gc/g1/g1MarkSweep.hpp"
43 #include "gc/g1/g1OopClosures.inline.hpp"
44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
46 #include "gc/g1/g1RemSet.inline.hpp"
47 #include "gc/g1/g1RootProcessor.hpp"
48 #include "gc/g1/g1StringDedup.hpp"
49 #include "gc/g1/g1YCTypes.hpp"
50 #include "gc/g1/heapRegion.inline.hpp"
51 #include "gc/g1/heapRegionRemSet.hpp"
52 #include "gc/g1/heapRegionSet.inline.hpp"
53 #include "gc/g1/suspendibleThreadSet.hpp"
54 #include "gc/g1/vm_operations_g1.hpp"
55 #include "gc/shared/gcHeapSummary.hpp"
56 #include "gc/shared/gcId.hpp"
57 #include "gc/shared/gcLocker.inline.hpp"
58 #include "gc/shared/gcTimer.hpp"
59 #include "gc/shared/gcTrace.hpp"
60 #include "gc/shared/gcTraceTime.hpp"
61 #include "gc/shared/generationSpec.hpp"
62 #include "gc/shared/isGCActiveMark.hpp"
63 #include "gc/shared/referenceProcessor.hpp"
64 #include "gc/shared/taskqueue.inline.hpp"
65 #include "memory/allocation.hpp"
66 #include "memory/iterator.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "runtime/atomic.inline.hpp"
69 #include "runtime/init.hpp"
70 #include "runtime/orderAccess.inline.hpp"
71 #include "runtime/vmThread.hpp"
72 #include "utilities/globalDefinitions.hpp"
73 #include "utilities/stack.inline.hpp"
74
75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
76
1441 };
1442
1443 void G1CollectedHeap::print_hrm_post_compaction() {
1444 PostCompactionPrinterClosure cl(hr_printer());
1445 heap_region_iterate(&cl);
1446 }
1447
1448 bool G1CollectedHeap::do_collection(bool explicit_gc,
1449 bool clear_all_soft_refs,
1450 size_t word_size) {
1451 assert_at_safepoint(true /* should_be_vm_thread */);
1452
1453 if (GC_locker::check_active_before_gc()) {
1454 return false;
1455 }
1456
1457 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1458 gc_timer->register_gc_start();
1459
1460 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1461 GCIdMark gc_id_mark;
1462 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1463
1464 SvcGCMarker sgcm(SvcGCMarker::FULL);
1465 ResourceMark rm;
1466
1467 G1Log::update_level();
1468 print_heap_before_gc();
1469 trace_heap_before_gc(gc_tracer);
1470
1471 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1472
1473 verify_region_sets_optional();
1474
1475 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1476 collector_policy()->should_clear_all_soft_refs();
1477
1478 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1479
1480 {
1481 IsGCActiveMark x;
1482
1483 // Timing
1484 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1485 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1486
1487 {
1488 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1489 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1490 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1491
1492 g1_policy()->record_full_collection_start();
1493
1494 // Note: When we have a more flexible GC logging framework that
1495 // allows us to add optional attributes to a GC log record we
1496 // could consider timing and reporting how long we wait in the
1497 // following two methods.
1498 wait_while_free_regions_coming();
1499 // If we start the compaction before the CM threads finish
1500 // scanning the root regions we might trip them over as we'll
1501 // be moving objects / updating references. So let's wait until
1502 // they are done. By telling them to abort, they should complete
1503 // early.
1504 _cm->root_regions()->abort();
1505 _cm->root_regions()->wait_until_scan_finished();
1506 append_secondary_free_list_if_not_empty_with_lock();
1507
1508 gc_prologue(true);
3917 totals += task_queue(i)->stats;
3918 }
3919 st->print_raw("tot "); totals.print(st); st->cr();
3920
3921 DEBUG_ONLY(totals.verify());
3922 }
3923
3924 void G1CollectedHeap::reset_taskqueue_stats() {
3925 const uint n = num_task_queues();
3926 for (uint i = 0; i < n; ++i) {
3927 task_queue(i)->stats.reset();
3928 }
3929 }
3930 #endif // TASKQUEUE_STATS
3931
3932 void G1CollectedHeap::log_gc_header() {
3933 if (!G1Log::fine()) {
3934 return;
3935 }
3936
3937 gclog_or_tty->gclog_stamp();
3938
3939 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3940 .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3941 .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3942
3943 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3944 }
3945
3946 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3947 if (!G1Log::fine()) {
3948 return;
3949 }
3950
3951 if (G1Log::finer()) {
3952 if (evacuation_failed()) {
3953 gclog_or_tty->print(" (to-space exhausted)");
3954 }
3955 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3956 g1_policy()->phase_times()->note_gc_end();
3957 g1_policy()->phase_times()->print(pause_time_sec);
3975 bool waited = _cm->root_regions()->wait_until_scan_finished();
3976 double wait_time_ms = 0.0;
3977 if (waited) {
3978 double scan_wait_end = os::elapsedTime();
3979 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3980 }
3981 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3982 }
3983
3984 bool
3985 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3986 assert_at_safepoint(true /* should_be_vm_thread */);
3987 guarantee(!is_gc_active(), "collection is not reentrant");
3988
3989 if (GC_locker::check_active_before_gc()) {
3990 return false;
3991 }
3992
3993 _gc_timer_stw->register_gc_start();
3994
3995
3996 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3997 ResourceMark rm;
3998
3999 wait_for_root_region_scanning();
4000
4001 bool should_start_conc_mark = fasle;
4002 {
4003 GCIdMark gc_id_mark;
4004 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
4005
4006 G1Log::update_level();
4007 print_heap_before_gc();
4008 trace_heap_before_gc(_gc_tracer_stw);
4009
4010 verify_region_sets_optional();
4011 verify_dirty_young_regions();
4012
4013 // This call will decide whether this pause is an initial-mark
4014 // pause. If it is, during_initial_mark_pause() will return true
4015 // for the duration of this pause.
4016 g1_policy()->decide_on_conc_mark_initiation();
4017
4018 // We do not allow initial-mark to be piggy-backed on a mixed GC.
4019 assert(!collector_state()->during_initial_mark_pause() ||
4020 collector_state()->gcs_are_young(), "sanity");
4021
4022 // We also do not allow mixed GCs during marking.
4023 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
4024
4025 // Record whether this pause is an initial mark. When the current
4026 // thread has completed its logging output and it's safe to signal
4027 // the CM thread, the flag's value in the policy has been reset.
4028 should_start_conc_mark = collector_state()->during_initial_mark_pause();
4029
4030 // Inner scope for scope based logging, timers, and stats collection
4031 {
4032 EvacuationInfo evacuation_info;
4033
4034 if (collector_state()->during_initial_mark_pause()) {
4035 // We are about to start a marking cycle, so we increment the
4036 // full collection counter.
4037 increment_old_marking_cycles_started();
4038 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4039 }
4040
4041 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
4042
4043 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4044
4045 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4046 workers()->active_workers(),
4047 Threads::number_of_non_daemon_threads());
4048 workers()->set_active_workers(active_workers);
4332 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4333 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4334
4335 print_heap_after_gc();
4336 trace_heap_after_gc(_gc_tracer_stw);
4337
4338 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4339 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4340 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4341 // before any GC notifications are raised.
4342 g1mm()->update_sizes();
4343
4344 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4345 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4346 _gc_timer_stw->register_gc_end();
4347 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4348 }
4349 // It should now be safe to tell the concurrent mark thread to start
4350 // without its logging output interfering with the logging output
4351 // that came from the pause.
4352 }
4353
4354 if (should_start_conc_mark) {
4355 // CAUTION: after the doConcurrentMark() call below,
4356 // the concurrent marking thread(s) could be running
4357 // concurrently with us. Make sure that anything after
4358 // this point does not assume that we are the only GC thread
4359 // running. Note: of course, the actual marking work will
4360 // not start until the safepoint itself is released in
4361 // SuspendibleThreadSet::desynchronize().
4362 doConcurrentMark();
4363 }
4364
4365 return true;
4366 }
4367
4368 void G1CollectedHeap::remove_self_forwarding_pointers() {
4369 double remove_self_forwards_start = os::elapsedTime();
4370
4371 G1ParRemoveSelfForwardPtrsTask rsfp_task;
4372 workers()->run_task(&rsfp_task);
5555 // We also need to mark copied objects.
5556 copy_non_heap_cl = ©_mark_non_heap_cl;
5557 }
5558
5559 // Keep alive closure.
5560 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
5561
5562 // Serial Complete GC closure
5563 G1STWDrainQueueClosure drain_queue(this, pss);
5564
5565 // Setup the soft refs policy...
5566 rp->setup_policy(false);
5567
5568 ReferenceProcessorStats stats;
5569 if (!rp->processing_is_mt()) {
5570 // Serial reference processing...
5571 stats = rp->process_discovered_references(&is_alive,
5572 &keep_alive,
5573 &drain_queue,
5574 NULL,
5575 _gc_timer_stw);
5576 } else {
5577 // Parallel reference processing
5578 assert(rp->num_q() == no_of_gc_workers, "sanity");
5579 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5580
5581 G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
5582 stats = rp->process_discovered_references(&is_alive,
5583 &keep_alive,
5584 &drain_queue,
5585 &par_task_executor,
5586 _gc_timer_stw);
5587 }
5588
5589 _gc_tracer_stw->report_gc_reference_stats(stats);
5590
5591 // We have completed copying any necessary live referent objects.
5592 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5593
5594 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5595 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5596 }
5597
5598 // Weak Reference processing during an evacuation pause (part 2).
5599 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
5600 double ref_enq_start = os::elapsedTime();
5601
5602 ReferenceProcessor* rp = _ref_processor_stw;
5603 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5604
5605 // Now enqueue any remaining on the discovered lists on to
5606 // the pending list.
|