1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
356 }
357
358 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
359 // The from card cache is not the memory that is actually committed. So we cannot
360 // take advantage of the zero_filled parameter.
361 reset_from_card_cache(start_idx, num_regions);
362 }
363
364 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
365 {
366 // Claim the right to put the region on the dirty cards region list
367 // by installing a self pointer.
368 HeapRegion* next = hr->get_next_dirty_cards_region();
369 if (next == NULL) {
370 HeapRegion* res = (HeapRegion*)
371 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
372 NULL);
373 if (res == NULL) {
374 HeapRegion* head;
375 do {
376 // Put the region to the dirty cards region list.
377 head = _dirty_cards_region_list;
378 next = (HeapRegion*)
379 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
380 if (next == head) {
381 assert(hr->get_next_dirty_cards_region() == hr,
382 "hr->get_next_dirty_cards_region() != hr");
383 if (next == NULL) {
1059 }
1060
1061 ShouldNotReachHere();
1062 return NULL;
1063 }
1064
1065 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1066 AllocationContext_t context,
1067 bool expect_null_mutator_alloc_region) {
1068 assert_at_safepoint(true /* should_be_vm_thread */);
1069 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1070 !expect_null_mutator_alloc_region,
1071 "the current alloc region was unexpectedly found to be non-NULL");
1072
1073 if (!is_humongous(word_size)) {
1074 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1075 false /* bot_updates */);
1076 } else {
1077 HeapWord* result = humongous_obj_allocate(word_size, context);
1078 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1079 g1_policy()->set_initiate_conc_mark_if_possible();
1080 }
1081 return result;
1082 }
1083
1084 ShouldNotReachHere();
1085 }
1086
1087 class PostMCRemSetClearClosure: public HeapRegionClosure {
1088 G1CollectedHeap* _g1h;
1089 ModRefBarrierSet* _mr_bs;
1090 public:
1091 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1092 _g1h(g1h), _mr_bs(mr_bs) {}
1093
1094 bool doHeapRegion(HeapRegion* r) {
1095 HeapRegionRemSet* hrrs = r->rem_set();
1096
1097 if (r->is_continues_humongous()) {
1098 // We'll assert that the strong code root list and RSet is empty
1099 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1271
1272 // Make sure we'll choose a new allocation region afterwards.
1273 _allocator->release_mutator_alloc_region();
1274 _allocator->abandon_gc_alloc_regions();
1275 g1_rem_set()->cleanupHRRS();
1276
1277 // We should call this after we retire any currently active alloc
1278 // regions so that all the ALLOC / RETIRE events are generated
1279 // before the start GC event.
1280 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1281
1282 // We may have added regions to the current incremental collection
1283 // set between the last GC or pause and now. We need to clear the
1284 // incremental collection set and then start rebuilding it afresh
1285 // after this full GC.
1286 abandon_collection_set(g1_policy()->inc_cset_head());
1287 g1_policy()->clear_incremental_cset();
1288 g1_policy()->stop_incremental_cset_building();
1289
1290 tear_down_region_sets(false /* free_list_only */);
1291 g1_policy()->set_gcs_are_young(true);
1292
1293 // See the comments in g1CollectedHeap.hpp and
1294 // G1CollectedHeap::ref_processing_init() about
1295 // how reference processing currently works in G1.
1296
1297 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1298 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1299
1300 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1301 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1302
1303 ref_processor_stw()->enable_discovery();
1304 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1305
1306 // Do collection work
1307 {
1308 HandleMark hm; // Discard invalid handles created during gc
1309 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1310 }
1311
1753
1754 // Public methods.
1755
1756 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1757 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1758 #endif // _MSC_VER
1759
1760
1761 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1762 SharedHeap(policy_),
1763 _g1_policy(policy_),
1764 _dirty_card_queue_set(false),
1765 _into_cset_dirty_card_queue_set(false),
1766 _is_alive_closure_cm(this),
1767 _is_alive_closure_stw(this),
1768 _ref_processor_cm(NULL),
1769 _ref_processor_stw(NULL),
1770 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771 _bot_shared(NULL),
1772 _evac_failure_scan_stack(NULL),
1773 _mark_in_progress(false),
1774 _cg1r(NULL),
1775 _g1mm(NULL),
1776 _refine_cte_cl(NULL),
1777 _full_collection(false),
1778 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781 _humongous_is_live(),
1782 _has_humongous_reclaim_candidates(false),
1783 _free_regions_coming(false),
1784 _young_list(new YoungList(this)),
1785 _gc_time_stamp(0),
1786 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787 _old_plab_stats(OldPLABSize, PLABWeight),
1788 _expand_heap_after_alloc_failure(true),
1789 _surviving_young_words(NULL),
1790 _old_marking_cycles_started(0),
1791 _old_marking_cycles_completed(0),
1792 _concurrent_cycle_started(false),
1793 _heap_summary_sent(false),
2361 _heap_summary_sent = false;
2362 }
2363 }
2364
2365 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2366 if (_concurrent_cycle_started) {
2367 // This function can be called when:
2368 // the cleanup pause is run
2369 // the concurrent cycle is aborted before the cleanup pause.
2370 // the concurrent cycle is aborted after the cleanup pause,
2371 // but before the concurrent cycle end has been registered.
2372 // Make sure that we only send the heap information once.
2373 if (!_heap_summary_sent) {
2374 trace_heap_after_gc(_gc_tracer_cm);
2375 _heap_summary_sent = true;
2376 }
2377 }
2378 }
2379
2380 G1YCType G1CollectedHeap::yc_type() {
2381 bool is_young = g1_policy()->gcs_are_young();
2382 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2383 bool is_during_mark = mark_in_progress();
2384
2385 if (is_initial_mark) {
2386 return InitialMark;
2387 } else if (is_during_mark) {
2388 return DuringMark;
2389 } else if (is_young) {
2390 return Normal;
2391 } else {
2392 return Mixed;
2393 }
2394 }
2395
2396 void G1CollectedHeap::collect(GCCause::Cause cause) {
2397 assert_heap_not_locked();
2398
2399 unsigned int gc_count_before;
2400 unsigned int old_marking_count_before;
2401 unsigned int full_gc_count_before;
2402 bool retry_gc;
3646
3647 DEBUG_ONLY(totals.verify());
3648 }
3649
3650 void G1CollectedHeap::reset_taskqueue_stats() {
3651 const int n = workers()->total_workers();
3652 for (int i = 0; i < n; ++i) {
3653 task_queue(i)->stats.reset();
3654 }
3655 }
3656 #endif // TASKQUEUE_STATS
3657
3658 void G1CollectedHeap::log_gc_header() {
3659 if (!G1Log::fine()) {
3660 return;
3661 }
3662
3663 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3664
3665 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3666 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3667 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3668
3669 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3670 }
3671
3672 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3673 if (!G1Log::fine()) {
3674 return;
3675 }
3676
3677 if (G1Log::finer()) {
3678 if (evacuation_failed()) {
3679 gclog_or_tty->print(" (to-space exhausted)");
3680 }
3681 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3682 g1_policy()->phase_times()->note_gc_end();
3683 g1_policy()->phase_times()->print(pause_time_sec);
3684 g1_policy()->print_detailed_heap_transition();
3685 } else {
3686 if (evacuation_failed()) {
3687 gclog_or_tty->print("--");
3703
3704 _gc_timer_stw->register_gc_start();
3705
3706 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3707
3708 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3709 ResourceMark rm;
3710
3711 print_heap_before_gc();
3712 trace_heap_before_gc(_gc_tracer_stw);
3713
3714 verify_region_sets_optional();
3715 verify_dirty_young_regions();
3716
3717 // This call will decide whether this pause is an initial-mark
3718 // pause. If it is, during_initial_mark_pause() will return true
3719 // for the duration of this pause.
3720 g1_policy()->decide_on_conc_mark_initiation();
3721
3722 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3723 assert(!g1_policy()->during_initial_mark_pause() ||
3724 g1_policy()->gcs_are_young(), "sanity");
3725
3726 // We also do not allow mixed GCs during marking.
3727 assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3728
3729 // Record whether this pause is an initial mark. When the current
3730 // thread has completed its logging output and it's safe to signal
3731 // the CM thread, the flag's value in the policy has been reset.
3732 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3733
3734 // Inner scope for scope based logging, timers, and stats collection
3735 {
3736 EvacuationInfo evacuation_info;
3737
3738 if (g1_policy()->during_initial_mark_pause()) {
3739 // We are about to start a marking cycle, so we increment the
3740 // full collection counter.
3741 increment_old_marking_cycles_started();
3742 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3743 }
3744
3745 _gc_tracer_stw->report_yc_type(yc_type());
3746
3747 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3748
3749 int active_workers = workers()->active_workers();
3750 double pause_start_sec = os::elapsedTime();
3751 g1_policy()->phase_times()->note_gc_start(active_workers);
3752 log_gc_header();
3753
3754 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3755 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3756
3757 // If the secondary_free_list is not empty, append it to the
3758 // free_list. No need to wait for the cleanup operation to finish;
3825 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3826
3827 double scan_wait_start = os::elapsedTime();
3828 // We have to wait until the CM threads finish scanning the
3829 // root regions as it's the only way to ensure that all the
3830 // objects on them have been correctly scanned before we start
3831 // moving them during the GC.
3832 bool waited = _cm->root_regions()->wait_until_scan_finished();
3833 double wait_time_ms = 0.0;
3834 if (waited) {
3835 double scan_wait_end = os::elapsedTime();
3836 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3837 }
3838 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3839
3840 #if YOUNG_LIST_VERBOSE
3841 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3842 _young_list->print();
3843 #endif // YOUNG_LIST_VERBOSE
3844
3845 if (g1_policy()->during_initial_mark_pause()) {
3846 concurrent_mark()->checkpointRootsInitialPre();
3847 }
3848
3849 #if YOUNG_LIST_VERBOSE
3850 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3851 _young_list->print();
3852 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3853 #endif // YOUNG_LIST_VERBOSE
3854
3855 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3856
3857 register_humongous_regions_with_in_cset_fast_test();
3858
3859 assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3860
3861 _cm->note_start_of_gc();
3862 // We should not verify the per-thread SATB buffers given that
3863 // we have not filtered them yet (we'll do so during the
3864 // GC). We also call this after finalize_cset() to
3865 // ensure that the CSet has been finalized.
3929 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3930 _young_list->first_survivor_region(),
3931 _young_list->last_survivor_region());
3932
3933 _young_list->reset_auxilary_lists();
3934
3935 if (evacuation_failed()) {
3936 _allocator->set_used(recalculate_used());
3937 uint n_queues = MAX2((int)ParallelGCThreads, 1);
3938 for (uint i = 0; i < n_queues; i++) {
3939 if (_evacuation_failed_info_array[i].has_failed()) {
3940 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3941 }
3942 }
3943 } else {
3944 // The "used" of the the collection set have already been subtracted
3945 // when they were freed. Add in the bytes evacuated.
3946 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3947 }
3948
3949 if (g1_policy()->during_initial_mark_pause()) {
3950 // We have to do this before we notify the CM threads that
3951 // they can start working to make sure that all the
3952 // appropriate initialization is done on the CM object.
3953 concurrent_mark()->checkpointRootsInitialPost();
3954 set_marking_started();
3955 // Note that we don't actually trigger the CM thread at
3956 // this point. We do that later when we're sure that
3957 // the current thread has completed its logging output.
3958 }
3959
3960 allocate_dummy_regions();
3961
3962 #if YOUNG_LIST_VERBOSE
3963 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3964 _young_list->print();
3965 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3966 #endif // YOUNG_LIST_VERBOSE
3967
3968 _allocator->init_mutator_alloc_region();
3969
4478 }
4479 };
4480
4481 void work(uint worker_id) {
4482 if (worker_id >= _n_workers) return; // no work needed this round
4483
4484 double start_time_ms = os::elapsedTime() * 1000.0;
4485 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4486
4487 {
4488 ResourceMark rm;
4489 HandleMark hm;
4490
4491 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4492
4493 G1ParScanThreadState pss(_g1h, worker_id, rp);
4494 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4495
4496 pss.set_evac_failure_closure(&evac_failure_cl);
4497
4498 bool only_young = _g1h->g1_policy()->gcs_are_young();
4499
4500 // Non-IM young GC.
4501 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4502 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4503 only_young, // Only process dirty klasses.
4504 false); // No need to claim CLDs.
4505 // IM young GC.
4506 // Strong roots closures.
4507 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4508 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4509 false, // Process all klasses.
4510 true); // Need to claim CLDs.
4511 // Weak roots closures.
4512 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4513 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4514 false, // Process all klasses.
4515 true); // Need to claim CLDs.
4516
4517 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4518 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4519 // IM Weak code roots are handled later.
4520
4521 OopClosure* strong_root_cl;
4522 OopClosure* weak_root_cl;
4523 CLDClosure* strong_cld_cl;
4524 CLDClosure* weak_cld_cl;
4525 CodeBlobClosure* strong_code_cl;
4526
4527 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4528 // We also need to mark copied objects.
4529 strong_root_cl = &scan_mark_root_cl;
4530 strong_cld_cl = &scan_mark_cld_cl;
4531 strong_code_cl = &scan_mark_code_cl;
4532 if (ClassUnloadingWithConcurrentMark) {
4533 weak_root_cl = &scan_mark_weak_root_cl;
4534 weak_cld_cl = &scan_mark_weak_cld_cl;
4535 } else {
4536 weak_root_cl = &scan_mark_root_cl;
4537 weak_cld_cl = &scan_mark_cld_cl;
4538 }
4539 } else {
4540 strong_root_cl = &scan_only_root_cl;
4541 weak_root_cl = &scan_only_root_cl;
4542 strong_cld_cl = &scan_only_cld_cl;
4543 weak_cld_cl = &scan_only_cld_cl;
4544 strong_code_cl = &scan_only_code_cl;
4545 }
4546
4547
4588 };
4589
4590 // *** Common G1 Evacuation Stuff
4591
4592 // This method is run in a GC worker.
4593
4594 void
4595 G1CollectedHeap::
4596 g1_process_roots(OopClosure* scan_non_heap_roots,
4597 OopClosure* scan_non_heap_weak_roots,
4598 G1ParPushHeapRSClosure* scan_rs,
4599 CLDClosure* scan_strong_clds,
4600 CLDClosure* scan_weak_clds,
4601 CodeBlobClosure* scan_strong_code,
4602 uint worker_i) {
4603
4604 // First scan the shared roots.
4605 double ext_roots_start = os::elapsedTime();
4606 double closure_app_time_sec = 0.0;
4607
4608 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4609 bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4610
4611 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4612 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4613
4614 process_roots(false, // no scoping; this is parallel code
4615 SharedHeap::SO_None,
4616 &buf_scan_non_heap_roots,
4617 &buf_scan_non_heap_weak_roots,
4618 scan_strong_clds,
4619 // Unloading Initial Marks handle the weak CLDs separately.
4620 (trace_metadata ? NULL : scan_weak_clds),
4621 scan_strong_code);
4622
4623 // Now the CM ref_processor roots.
4624 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4625 // We need to treat the discovered reference lists of the
4626 // concurrent mark ref processor as roots and keep entries
4627 // (which are added by the marking threads) on them live
4628 // until they can be processed at the end of marking.
5278 {}
5279
5280 virtual void work(uint worker_id) {
5281 // The reference processing task executed by a single worker.
5282 ResourceMark rm;
5283 HandleMark hm;
5284
5285 G1STWIsAliveClosure is_alive(_g1h);
5286
5287 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5288 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5289
5290 pss.set_evac_failure_closure(&evac_failure_cl);
5291
5292 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5293
5294 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5295
5296 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5297
5298 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5299 // We also need to mark copied objects.
5300 copy_non_heap_cl = ©_mark_non_heap_cl;
5301 }
5302
5303 // Keep alive closure.
5304 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5305
5306 // Complete GC closure
5307 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5308
5309 // Call the reference processing task's work routine.
5310 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5311
5312 // Note we cannot assert that the refs array is empty here as not all
5313 // of the processing tasks (specifically phase2 - pp2_work) execute
5314 // the complete_gc closure (which ordinarily would drain the queue) so
5315 // the queue may not be empty.
5316 }
5317 };
5318
5383 _n_workers(workers)
5384 { }
5385
5386 void work(uint worker_id) {
5387 ResourceMark rm;
5388 HandleMark hm;
5389
5390 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5391 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5392
5393 pss.set_evac_failure_closure(&evac_failure_cl);
5394
5395 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5396
5397 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5398
5399 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5400
5401 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5402
5403 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5404 // We also need to mark copied objects.
5405 copy_non_heap_cl = ©_mark_non_heap_cl;
5406 }
5407
5408 // Is alive closure
5409 G1AlwaysAliveClosure always_alive(_g1h);
5410
5411 // Copying keep alive closure. Applied to referent objects that need
5412 // to be copied.
5413 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5414
5415 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5416
5417 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5418 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5419
5420 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5421 // So this must be true - but assert just in case someone decides to
5422 // change the worker ids.
5423 assert(0 <= worker_id && worker_id < limit, "sanity");
5498 // JNI refs.
5499
5500 // Use only a single queue for this PSS.
5501 G1ParScanThreadState pss(this, 0, NULL);
5502
5503 // We do not embed a reference processor in the copying/scanning
5504 // closures while we're actually processing the discovered
5505 // reference objects.
5506 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5507
5508 pss.set_evac_failure_closure(&evac_failure_cl);
5509
5510 assert(pss.queue_is_empty(), "pre-condition");
5511
5512 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5513
5514 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5515
5516 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5517
5518 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5519 // We also need to mark copied objects.
5520 copy_non_heap_cl = ©_mark_non_heap_cl;
5521 }
5522
5523 // Keep alive closure.
5524 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5525
5526 // Serial Complete GC closure
5527 G1STWDrainQueueClosure drain_queue(this, &pss);
5528
5529 // Setup the soft refs policy...
5530 rp->setup_policy(false);
5531
5532 ReferenceProcessorStats stats;
5533 if (!rp->processing_is_mt()) {
5534 // Serial reference processing...
5535 stats = rp->process_discovered_references(&is_alive,
5536 &keep_alive,
5537 &drain_queue,
5538 NULL,
5616 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5617 workers()->active_workers(),
5618 Threads::number_of_non_daemon_threads());
5619 assert(UseDynamicNumberOfGCThreads ||
5620 n_workers == workers()->total_workers(),
5621 "If not dynamic should be using all the workers");
5622 workers()->set_active_workers(n_workers);
5623 set_par_threads(n_workers);
5624
5625 G1ParTask g1_par_task(this, _task_queues);
5626
5627 init_for_evac_failure(NULL);
5628
5629 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5630 double start_par_time_sec = os::elapsedTime();
5631 double end_par_time_sec;
5632
5633 {
5634 StrongRootsScope srs(this);
5635 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5636 if (g1_policy()->during_initial_mark_pause()) {
5637 ClassLoaderDataGraph::clear_claimed_marks();
5638 }
5639
5640 // The individual threads will set their evac-failure closures.
5641 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5642 // These tasks use ShareHeap::_process_strong_tasks
5643 assert(UseDynamicNumberOfGCThreads ||
5644 workers()->active_workers() == workers()->total_workers(),
5645 "If not dynamic should be using all the workers");
5646 workers()->run_task(&g1_par_task);
5647 end_par_time_sec = os::elapsedTime();
5648
5649 // Closing the inner scope will execute the destructor
5650 // for the StrongRootsScope object. We record the current
5651 // elapsed time before closing the scope so that time
5652 // taken for the SRS destructor is NOT included in the
5653 // reported parallel time.
5654 }
5655
5656 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
6569
6570 if (count < g1_policy()->max_regions(dest)) {
6571 const bool is_survivor = (dest.is_young());
6572 HeapRegion* new_alloc_region = new_region(word_size,
6573 !is_survivor,
6574 true /* do_expand */);
6575 if (new_alloc_region != NULL) {
6576 // We really only need to do this for old regions given that we
6577 // should never scan survivors. But it doesn't hurt to do it
6578 // for survivors too.
6579 new_alloc_region->record_timestamp();
6580 if (is_survivor) {
6581 new_alloc_region->set_survivor();
6582 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6583 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6584 } else {
6585 new_alloc_region->set_old();
6586 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6587 check_bitmaps("Old Region Allocation", new_alloc_region);
6588 }
6589 bool during_im = g1_policy()->during_initial_mark_pause();
6590 new_alloc_region->note_start_of_copying(during_im);
6591 return new_alloc_region;
6592 }
6593 }
6594 return NULL;
6595 }
6596
6597 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6598 size_t allocated_bytes,
6599 InCSetState dest) {
6600 bool during_im = g1_policy()->during_initial_mark_pause();
6601 alloc_region->note_end_of_copying(during_im);
6602 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6603 if (dest.is_young()) {
6604 young_list()->add_survivor_region(alloc_region);
6605 } else {
6606 _old_set.add(alloc_region);
6607 }
6608 _hr_printer.retire(alloc_region);
6609 }
6610
6611 // Heap region set verification
6612
6613 class VerifyRegionListsClosure : public HeapRegionClosure {
6614 private:
6615 HeapRegionSet* _old_set;
6616 HeapRegionSet* _humongous_set;
6617 HeapRegionManager* _hrm;
6618
6619 public:
6620 HeapRegionSetCount _old_count;
|
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
356 }
357
358 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
359 // The from card cache is not the memory that is actually committed. So we cannot
360 // take advantage of the zero_filled parameter.
361 reset_from_card_cache(start_idx, num_regions);
362 }
363
364 ////////////////////// G1CollectedHeap methods ////////////////////////////////
365
366 // Records the fact that a marking phase is no longer in progress.
367 void G1CollectedHeap::set_marking_complete() {
368 g1_policy()->collector_state()->set_marking_complete();
369 }
370
371 // Records the fact that a marking phase has commenced.
372 void G1CollectedHeap::set_marking_started() {
373 g1_policy()->collector_state()->set_marking_started();
374 }
375
376 // Returns whether a marking phase is currently in progress.
377 bool G1CollectedHeap::mark_in_progress() {
378 return g1_policy()->collector_state()->mark_in_progress();
379 }
380
381 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
382 {
383 // Claim the right to put the region on the dirty cards region list
384 // by installing a self pointer.
385 HeapRegion* next = hr->get_next_dirty_cards_region();
386 if (next == NULL) {
387 HeapRegion* res = (HeapRegion*)
388 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
389 NULL);
390 if (res == NULL) {
391 HeapRegion* head;
392 do {
393 // Put the region to the dirty cards region list.
394 head = _dirty_cards_region_list;
395 next = (HeapRegion*)
396 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
397 if (next == head) {
398 assert(hr->get_next_dirty_cards_region() == hr,
399 "hr->get_next_dirty_cards_region() != hr");
400 if (next == NULL) {
1076 }
1077
1078 ShouldNotReachHere();
1079 return NULL;
1080 }
1081
1082 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1083 AllocationContext_t context,
1084 bool expect_null_mutator_alloc_region) {
1085 assert_at_safepoint(true /* should_be_vm_thread */);
1086 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1087 !expect_null_mutator_alloc_region,
1088 "the current alloc region was unexpectedly found to be non-NULL");
1089
1090 if (!is_humongous(word_size)) {
1091 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1092 false /* bot_updates */);
1093 } else {
1094 HeapWord* result = humongous_obj_allocate(word_size, context);
1095 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1096 g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
1097 }
1098 return result;
1099 }
1100
1101 ShouldNotReachHere();
1102 }
1103
1104 class PostMCRemSetClearClosure: public HeapRegionClosure {
1105 G1CollectedHeap* _g1h;
1106 ModRefBarrierSet* _mr_bs;
1107 public:
1108 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1109 _g1h(g1h), _mr_bs(mr_bs) {}
1110
1111 bool doHeapRegion(HeapRegion* r) {
1112 HeapRegionRemSet* hrrs = r->rem_set();
1113
1114 if (r->is_continues_humongous()) {
1115 // We'll assert that the strong code root list and RSet is empty
1116 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1288
1289 // Make sure we'll choose a new allocation region afterwards.
1290 _allocator->release_mutator_alloc_region();
1291 _allocator->abandon_gc_alloc_regions();
1292 g1_rem_set()->cleanupHRRS();
1293
1294 // We should call this after we retire any currently active alloc
1295 // regions so that all the ALLOC / RETIRE events are generated
1296 // before the start GC event.
1297 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1298
1299 // We may have added regions to the current incremental collection
1300 // set between the last GC or pause and now. We need to clear the
1301 // incremental collection set and then start rebuilding it afresh
1302 // after this full GC.
1303 abandon_collection_set(g1_policy()->inc_cset_head());
1304 g1_policy()->clear_incremental_cset();
1305 g1_policy()->stop_incremental_cset_building();
1306
1307 tear_down_region_sets(false /* free_list_only */);
1308 g1_policy()->collector_state()->set_gcs_are_young(true);
1309
1310 // See the comments in g1CollectedHeap.hpp and
1311 // G1CollectedHeap::ref_processing_init() about
1312 // how reference processing currently works in G1.
1313
1314 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1315 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1316
1317 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1318 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1319
1320 ref_processor_stw()->enable_discovery();
1321 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1322
1323 // Do collection work
1324 {
1325 HandleMark hm; // Discard invalid handles created during gc
1326 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1327 }
1328
1770
1771 // Public methods.
1772
1773 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1774 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1775 #endif // _MSC_VER
1776
1777
1778 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1779 SharedHeap(policy_),
1780 _g1_policy(policy_),
1781 _dirty_card_queue_set(false),
1782 _into_cset_dirty_card_queue_set(false),
1783 _is_alive_closure_cm(this),
1784 _is_alive_closure_stw(this),
1785 _ref_processor_cm(NULL),
1786 _ref_processor_stw(NULL),
1787 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1788 _bot_shared(NULL),
1789 _evac_failure_scan_stack(NULL),
1790 _cg1r(NULL),
1791 _g1mm(NULL),
1792 _refine_cte_cl(NULL),
1793 _full_collection(false),
1794 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1795 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1796 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1797 _humongous_is_live(),
1798 _has_humongous_reclaim_candidates(false),
1799 _free_regions_coming(false),
1800 _young_list(new YoungList(this)),
1801 _gc_time_stamp(0),
1802 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1803 _old_plab_stats(OldPLABSize, PLABWeight),
1804 _expand_heap_after_alloc_failure(true),
1805 _surviving_young_words(NULL),
1806 _old_marking_cycles_started(0),
1807 _old_marking_cycles_completed(0),
1808 _concurrent_cycle_started(false),
1809 _heap_summary_sent(false),
2377 _heap_summary_sent = false;
2378 }
2379 }
2380
2381 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2382 if (_concurrent_cycle_started) {
2383 // This function can be called when:
2384 // the cleanup pause is run
2385 // the concurrent cycle is aborted before the cleanup pause.
2386 // the concurrent cycle is aborted after the cleanup pause,
2387 // but before the concurrent cycle end has been registered.
2388 // Make sure that we only send the heap information once.
2389 if (!_heap_summary_sent) {
2390 trace_heap_after_gc(_gc_tracer_cm);
2391 _heap_summary_sent = true;
2392 }
2393 }
2394 }
2395
2396 G1YCType G1CollectedHeap::yc_type() {
2397 bool is_young = g1_policy()->collector_state()->gcs_are_young();
2398 bool is_initial_mark = g1_policy()->collector_state()->during_initial_mark_pause();
2399 bool is_during_mark = mark_in_progress();
2400
2401 if (is_initial_mark) {
2402 return InitialMark;
2403 } else if (is_during_mark) {
2404 return DuringMark;
2405 } else if (is_young) {
2406 return Normal;
2407 } else {
2408 return Mixed;
2409 }
2410 }
2411
2412 void G1CollectedHeap::collect(GCCause::Cause cause) {
2413 assert_heap_not_locked();
2414
2415 unsigned int gc_count_before;
2416 unsigned int old_marking_count_before;
2417 unsigned int full_gc_count_before;
2418 bool retry_gc;
3662
3663 DEBUG_ONLY(totals.verify());
3664 }
3665
3666 void G1CollectedHeap::reset_taskqueue_stats() {
3667 const int n = workers()->total_workers();
3668 for (int i = 0; i < n; ++i) {
3669 task_queue(i)->stats.reset();
3670 }
3671 }
3672 #endif // TASKQUEUE_STATS
3673
3674 void G1CollectedHeap::log_gc_header() {
3675 if (!G1Log::fine()) {
3676 return;
3677 }
3678
3679 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3680
3681 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3682 .append(g1_policy()->collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3683 .append(g1_policy()->collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3684
3685 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3686 }
3687
3688 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3689 if (!G1Log::fine()) {
3690 return;
3691 }
3692
3693 if (G1Log::finer()) {
3694 if (evacuation_failed()) {
3695 gclog_or_tty->print(" (to-space exhausted)");
3696 }
3697 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3698 g1_policy()->phase_times()->note_gc_end();
3699 g1_policy()->phase_times()->print(pause_time_sec);
3700 g1_policy()->print_detailed_heap_transition();
3701 } else {
3702 if (evacuation_failed()) {
3703 gclog_or_tty->print("--");
3719
3720 _gc_timer_stw->register_gc_start();
3721
3722 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3723
3724 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3725 ResourceMark rm;
3726
3727 print_heap_before_gc();
3728 trace_heap_before_gc(_gc_tracer_stw);
3729
3730 verify_region_sets_optional();
3731 verify_dirty_young_regions();
3732
3733 // This call will decide whether this pause is an initial-mark
3734 // pause. If it is, during_initial_mark_pause() will return true
3735 // for the duration of this pause.
3736 g1_policy()->decide_on_conc_mark_initiation();
3737
3738 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3739 assert(!g1_policy()->collector_state()->during_initial_mark_pause() ||
3740 g1_policy()->collector_state()->gcs_are_young(), "sanity");
3741
3742 // We also do not allow mixed GCs during marking.
3743 assert(!mark_in_progress() || g1_policy()->collector_state()->gcs_are_young(), "sanity");
3744
3745 // Record whether this pause is an initial mark. When the current
3746 // thread has completed its logging output and it's safe to signal
3747 // the CM thread, the flag's value in the policy has been reset.
3748 bool should_start_conc_mark = g1_policy()->collector_state()->during_initial_mark_pause();
3749
3750 // Inner scope for scope based logging, timers, and stats collection
3751 {
3752 EvacuationInfo evacuation_info;
3753
3754 if (g1_policy()->collector_state()->during_initial_mark_pause()) {
3755 // We are about to start a marking cycle, so we increment the
3756 // full collection counter.
3757 increment_old_marking_cycles_started();
3758 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3759 }
3760
3761 _gc_tracer_stw->report_yc_type(yc_type());
3762
3763 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3764
3765 int active_workers = workers()->active_workers();
3766 double pause_start_sec = os::elapsedTime();
3767 g1_policy()->phase_times()->note_gc_start(active_workers);
3768 log_gc_header();
3769
3770 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3771 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3772
3773 // If the secondary_free_list is not empty, append it to the
3774 // free_list. No need to wait for the cleanup operation to finish;
3841 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3842
3843 double scan_wait_start = os::elapsedTime();
3844 // We have to wait until the CM threads finish scanning the
3845 // root regions as it's the only way to ensure that all the
3846 // objects on them have been correctly scanned before we start
3847 // moving them during the GC.
3848 bool waited = _cm->root_regions()->wait_until_scan_finished();
3849 double wait_time_ms = 0.0;
3850 if (waited) {
3851 double scan_wait_end = os::elapsedTime();
3852 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3853 }
3854 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3855
3856 #if YOUNG_LIST_VERBOSE
3857 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3858 _young_list->print();
3859 #endif // YOUNG_LIST_VERBOSE
3860
3861 if (g1_policy()->collector_state()->during_initial_mark_pause()) {
3862 concurrent_mark()->checkpointRootsInitialPre();
3863 }
3864
3865 #if YOUNG_LIST_VERBOSE
3866 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3867 _young_list->print();
3868 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3869 #endif // YOUNG_LIST_VERBOSE
3870
3871 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3872
3873 register_humongous_regions_with_in_cset_fast_test();
3874
3875 assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3876
3877 _cm->note_start_of_gc();
3878 // We should not verify the per-thread SATB buffers given that
3879 // we have not filtered them yet (we'll do so during the
3880 // GC). We also call this after finalize_cset() to
3881 // ensure that the CSet has been finalized.
3945 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3946 _young_list->first_survivor_region(),
3947 _young_list->last_survivor_region());
3948
3949 _young_list->reset_auxilary_lists();
3950
3951 if (evacuation_failed()) {
3952 _allocator->set_used(recalculate_used());
3953 uint n_queues = MAX2((int)ParallelGCThreads, 1);
3954 for (uint i = 0; i < n_queues; i++) {
3955 if (_evacuation_failed_info_array[i].has_failed()) {
3956 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3957 }
3958 }
3959 } else {
3960 // The "used" of the the collection set have already been subtracted
3961 // when they were freed. Add in the bytes evacuated.
3962 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3963 }
3964
3965 if (g1_policy()->collector_state()->during_initial_mark_pause()) {
3966 // We have to do this before we notify the CM threads that
3967 // they can start working to make sure that all the
3968 // appropriate initialization is done on the CM object.
3969 concurrent_mark()->checkpointRootsInitialPost();
3970 set_marking_started();
3971 // Note that we don't actually trigger the CM thread at
3972 // this point. We do that later when we're sure that
3973 // the current thread has completed its logging output.
3974 }
3975
3976 allocate_dummy_regions();
3977
3978 #if YOUNG_LIST_VERBOSE
3979 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3980 _young_list->print();
3981 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3982 #endif // YOUNG_LIST_VERBOSE
3983
3984 _allocator->init_mutator_alloc_region();
3985
4494 }
4495 };
4496
4497 void work(uint worker_id) {
4498 if (worker_id >= _n_workers) return; // no work needed this round
4499
4500 double start_time_ms = os::elapsedTime() * 1000.0;
4501 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4502
4503 {
4504 ResourceMark rm;
4505 HandleMark hm;
4506
4507 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4508
4509 G1ParScanThreadState pss(_g1h, worker_id, rp);
4510 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4511
4512 pss.set_evac_failure_closure(&evac_failure_cl);
4513
4514 bool only_young = _g1h->g1_policy()->collector_state()->gcs_are_young();
4515
4516 // Non-IM young GC.
4517 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4518 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4519 only_young, // Only process dirty klasses.
4520 false); // No need to claim CLDs.
4521 // IM young GC.
4522 // Strong roots closures.
4523 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4524 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4525 false, // Process all klasses.
4526 true); // Need to claim CLDs.
4527 // Weak roots closures.
4528 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4529 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4530 false, // Process all klasses.
4531 true); // Need to claim CLDs.
4532
4533 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4534 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4535 // IM Weak code roots are handled later.
4536
4537 OopClosure* strong_root_cl;
4538 OopClosure* weak_root_cl;
4539 CLDClosure* strong_cld_cl;
4540 CLDClosure* weak_cld_cl;
4541 CodeBlobClosure* strong_code_cl;
4542
4543 if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) {
4544 // We also need to mark copied objects.
4545 strong_root_cl = &scan_mark_root_cl;
4546 strong_cld_cl = &scan_mark_cld_cl;
4547 strong_code_cl = &scan_mark_code_cl;
4548 if (ClassUnloadingWithConcurrentMark) {
4549 weak_root_cl = &scan_mark_weak_root_cl;
4550 weak_cld_cl = &scan_mark_weak_cld_cl;
4551 } else {
4552 weak_root_cl = &scan_mark_root_cl;
4553 weak_cld_cl = &scan_mark_cld_cl;
4554 }
4555 } else {
4556 strong_root_cl = &scan_only_root_cl;
4557 weak_root_cl = &scan_only_root_cl;
4558 strong_cld_cl = &scan_only_cld_cl;
4559 weak_cld_cl = &scan_only_cld_cl;
4560 strong_code_cl = &scan_only_code_cl;
4561 }
4562
4563
4604 };
4605
4606 // *** Common G1 Evacuation Stuff
4607
4608 // This method is run in a GC worker.
4609
4610 void
4611 G1CollectedHeap::
4612 g1_process_roots(OopClosure* scan_non_heap_roots,
4613 OopClosure* scan_non_heap_weak_roots,
4614 G1ParPushHeapRSClosure* scan_rs,
4615 CLDClosure* scan_strong_clds,
4616 CLDClosure* scan_weak_clds,
4617 CodeBlobClosure* scan_strong_code,
4618 uint worker_i) {
4619
4620 // First scan the shared roots.
4621 double ext_roots_start = os::elapsedTime();
4622 double closure_app_time_sec = 0.0;
4623
4624 bool during_im = _g1h->g1_policy()->collector_state()->during_initial_mark_pause();
4625 bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4626
4627 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4628 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4629
4630 process_roots(false, // no scoping; this is parallel code
4631 SharedHeap::SO_None,
4632 &buf_scan_non_heap_roots,
4633 &buf_scan_non_heap_weak_roots,
4634 scan_strong_clds,
4635 // Unloading Initial Marks handle the weak CLDs separately.
4636 (trace_metadata ? NULL : scan_weak_clds),
4637 scan_strong_code);
4638
4639 // Now the CM ref_processor roots.
4640 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4641 // We need to treat the discovered reference lists of the
4642 // concurrent mark ref processor as roots and keep entries
4643 // (which are added by the marking threads) on them live
4644 // until they can be processed at the end of marking.
5294 {}
5295
5296 virtual void work(uint worker_id) {
5297 // The reference processing task executed by a single worker.
5298 ResourceMark rm;
5299 HandleMark hm;
5300
5301 G1STWIsAliveClosure is_alive(_g1h);
5302
5303 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5304 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5305
5306 pss.set_evac_failure_closure(&evac_failure_cl);
5307
5308 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5309
5310 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5311
5312 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5313
5314 if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) {
5315 // We also need to mark copied objects.
5316 copy_non_heap_cl = ©_mark_non_heap_cl;
5317 }
5318
5319 // Keep alive closure.
5320 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5321
5322 // Complete GC closure
5323 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5324
5325 // Call the reference processing task's work routine.
5326 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5327
5328 // Note we cannot assert that the refs array is empty here as not all
5329 // of the processing tasks (specifically phase2 - pp2_work) execute
5330 // the complete_gc closure (which ordinarily would drain the queue) so
5331 // the queue may not be empty.
5332 }
5333 };
5334
5399 _n_workers(workers)
5400 { }
5401
5402 void work(uint worker_id) {
5403 ResourceMark rm;
5404 HandleMark hm;
5405
5406 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5407 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5408
5409 pss.set_evac_failure_closure(&evac_failure_cl);
5410
5411 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5412
5413 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5414
5415 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5416
5417 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5418
5419 if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) {
5420 // We also need to mark copied objects.
5421 copy_non_heap_cl = ©_mark_non_heap_cl;
5422 }
5423
5424 // Is alive closure
5425 G1AlwaysAliveClosure always_alive(_g1h);
5426
5427 // Copying keep alive closure. Applied to referent objects that need
5428 // to be copied.
5429 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5430
5431 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5432
5433 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5434 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5435
5436 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5437 // So this must be true - but assert just in case someone decides to
5438 // change the worker ids.
5439 assert(0 <= worker_id && worker_id < limit, "sanity");
5514 // JNI refs.
5515
5516 // Use only a single queue for this PSS.
5517 G1ParScanThreadState pss(this, 0, NULL);
5518
5519 // We do not embed a reference processor in the copying/scanning
5520 // closures while we're actually processing the discovered
5521 // reference objects.
5522 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5523
5524 pss.set_evac_failure_closure(&evac_failure_cl);
5525
5526 assert(pss.queue_is_empty(), "pre-condition");
5527
5528 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5529
5530 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5531
5532 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5533
5534 if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) {
5535 // We also need to mark copied objects.
5536 copy_non_heap_cl = ©_mark_non_heap_cl;
5537 }
5538
5539 // Keep alive closure.
5540 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5541
5542 // Serial Complete GC closure
5543 G1STWDrainQueueClosure drain_queue(this, &pss);
5544
5545 // Setup the soft refs policy...
5546 rp->setup_policy(false);
5547
5548 ReferenceProcessorStats stats;
5549 if (!rp->processing_is_mt()) {
5550 // Serial reference processing...
5551 stats = rp->process_discovered_references(&is_alive,
5552 &keep_alive,
5553 &drain_queue,
5554 NULL,
5632 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5633 workers()->active_workers(),
5634 Threads::number_of_non_daemon_threads());
5635 assert(UseDynamicNumberOfGCThreads ||
5636 n_workers == workers()->total_workers(),
5637 "If not dynamic should be using all the workers");
5638 workers()->set_active_workers(n_workers);
5639 set_par_threads(n_workers);
5640
5641 G1ParTask g1_par_task(this, _task_queues);
5642
5643 init_for_evac_failure(NULL);
5644
5645 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5646 double start_par_time_sec = os::elapsedTime();
5647 double end_par_time_sec;
5648
5649 {
5650 StrongRootsScope srs(this);
5651 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5652 if (g1_policy()->collector_state()->during_initial_mark_pause()) {
5653 ClassLoaderDataGraph::clear_claimed_marks();
5654 }
5655
5656 // The individual threads will set their evac-failure closures.
5657 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5658 // These tasks use ShareHeap::_process_strong_tasks
5659 assert(UseDynamicNumberOfGCThreads ||
5660 workers()->active_workers() == workers()->total_workers(),
5661 "If not dynamic should be using all the workers");
5662 workers()->run_task(&g1_par_task);
5663 end_par_time_sec = os::elapsedTime();
5664
5665 // Closing the inner scope will execute the destructor
5666 // for the StrongRootsScope object. We record the current
5667 // elapsed time before closing the scope so that time
5668 // taken for the SRS destructor is NOT included in the
5669 // reported parallel time.
5670 }
5671
5672 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
6585
6586 if (count < g1_policy()->max_regions(dest)) {
6587 const bool is_survivor = (dest.is_young());
6588 HeapRegion* new_alloc_region = new_region(word_size,
6589 !is_survivor,
6590 true /* do_expand */);
6591 if (new_alloc_region != NULL) {
6592 // We really only need to do this for old regions given that we
6593 // should never scan survivors. But it doesn't hurt to do it
6594 // for survivors too.
6595 new_alloc_region->record_timestamp();
6596 if (is_survivor) {
6597 new_alloc_region->set_survivor();
6598 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6599 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6600 } else {
6601 new_alloc_region->set_old();
6602 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6603 check_bitmaps("Old Region Allocation", new_alloc_region);
6604 }
6605 bool during_im = g1_policy()->collector_state()->during_initial_mark_pause();
6606 new_alloc_region->note_start_of_copying(during_im);
6607 return new_alloc_region;
6608 }
6609 }
6610 return NULL;
6611 }
6612
6613 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6614 size_t allocated_bytes,
6615 InCSetState dest) {
6616 bool during_im = g1_policy()->collector_state()->during_initial_mark_pause();
6617 alloc_region->note_end_of_copying(during_im);
6618 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6619 if (dest.is_young()) {
6620 young_list()->add_survivor_region(alloc_region);
6621 } else {
6622 _old_set.add(alloc_region);
6623 }
6624 _hr_printer.retire(alloc_region);
6625 }
6626
6627 // Heap region set verification
6628
6629 class VerifyRegionListsClosure : public HeapRegionClosure {
6630 private:
6631 HeapRegionSet* _old_set;
6632 HeapRegionSet* _humongous_set;
6633 HeapRegionManager* _hrm;
6634
6635 public:
6636 HeapRegionSetCount _old_count;
|