1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
767 for (uint i = first + 1; i < last; ++i) {
768 hr = region_at(i);
769 if ((i + 1) == last) {
770 // last continues humongous region
771 assert(hr->bottom() < new_top && new_top <= hr->end(),
772 "new_top should fall on this region");
773 hr->set_top(new_top);
774 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
775 } else {
776 // not last one
777 assert(new_top > hr->end(), "new_top should be above this region");
778 hr->set_top(hr->end());
779 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
780 }
781 }
782 // If we have continues humongous regions (hr != NULL), then the
783 // end of the last one should match new_end and its top should
784 // match new_top.
785 assert(hr == NULL ||
786 (hr->end() == new_end && hr->top() == new_top), "sanity");
787
788 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
789 _summary_bytes_used += first_hr->used();
790 _humongous_set.add(first_hr);
791
792 return new_obj;
793 }
794
795 // If could fit into free regions w/o expansion, try.
796 // Otherwise, if can expand, do so.
797 // Otherwise, if using ex regions might help, try with ex given back.
798 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
799 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
800
801 verify_region_sets_optional();
802
803 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
804 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
805 uint x_num = expansion_regions();
806 uint fs = _hrs.free_suffix();
1317 // following two methods.
1318 wait_while_free_regions_coming();
1319 // If we start the compaction before the CM threads finish
1320 // scanning the root regions we might trip them over as we'll
1321 // be moving objects / updating references. So let's wait until
1322 // they are done. By telling them to abort, they should complete
1323 // early.
1324 _cm->root_regions()->abort();
1325 _cm->root_regions()->wait_until_scan_finished();
1326 append_secondary_free_list_if_not_empty_with_lock();
1327
1328 gc_prologue(true);
1329 increment_total_collections(true /* full gc */);
1330 increment_old_marking_cycles_started();
1331
1332 size_t g1h_prev_used = used();
1333 assert(used() == recalculate_used(), "Should be equal");
1334
1335 verify_before_gc();
1336
1337 pre_full_gc_dump();
1338
1339 COMPILER2_PRESENT(DerivedPointerTable::clear());
1340
1341 // Disable discovery and empty the discovered lists
1342 // for the CM ref processor.
1343 ref_processor_cm()->disable_discovery();
1344 ref_processor_cm()->abandon_partial_discovery();
1345 ref_processor_cm()->verify_no_references_recorded();
1346
1347 // Abandon current iterations of concurrent marking and concurrent
1348 // refinement, if any are in progress. We have to do this before
1349 // wait_until_scan_finished() below.
1350 concurrent_mark()->abort();
1351
1352 // Make sure we'll choose a new allocation region afterwards.
1353 release_mutator_alloc_region();
1354 abandon_gc_alloc_regions();
1355 g1_rem_set()->cleanupHRRS();
1356
1357 // We should call this after we retire any currently active alloc
1358 // regions so that all the ALLOC / RETIRE events are generated
1359 // before the start GC event.
1360 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1361
1362 // We may have added regions to the current incremental collection
1363 // set between the last GC or pause and now. We need to clear the
1364 // incremental collection set and then start rebuilding it afresh
1365 // after this full GC.
1366 abandon_collection_set(g1_policy()->inc_cset_head());
1367 g1_policy()->clear_incremental_cset();
1368 g1_policy()->stop_incremental_cset_building();
1369
1372
1373 // See the comments in g1CollectedHeap.hpp and
1374 // G1CollectedHeap::ref_processing_init() about
1375 // how reference processing currently works in G1.
1376
1377 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1378 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1379
1380 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1381 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1382
1383 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1384 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1385
1386 // Do collection work
1387 {
1388 HandleMark hm; // Discard invalid handles created during gc
1389 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1390 }
1391
1392 assert(free_regions() == 0, "we should not have added any free regions");
1393 rebuild_region_sets(false /* free_list_only */);
1394
1395 // Enqueue any discovered reference objects that have
1396 // not been removed from the discovered lists.
1397 ref_processor_stw()->enqueue_discovered_references();
1398
1399 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1400
1401 MemoryService::track_memory_usage();
1402
1403 verify_after_gc();
1404
1405 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1406 ref_processor_stw()->verify_no_references_recorded();
1407
1408 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1409 ClassLoaderDataGraph::purge();
1410
1411 // Note: since we've just done a full GC, concurrent
1412 // marking is no longer active. Therefore we need not
1413 // re-enable reference discovery for the CM ref processor.
1414 // That will be done at the start of the next marking cycle.
1415 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1416 ref_processor_cm()->verify_no_references_recorded();
1417
1418 reset_gc_time_stamp();
1419 // Since everything potentially moved, we will clear all remembered
1420 // sets, and clear all cards. Later we will rebuild remebered
1421 // sets. We will also reset the GC time stamps of the regions.
1422 clear_rsets_post_compaction();
1423 check_gc_time_stamps();
1424
3758 // get entries from the secondary_free_list.
3759 if (!G1StressConcRegionFreeing) {
3760 append_secondary_free_list_if_not_empty_with_lock();
3761 }
3762
3763 assert(check_young_list_well_formed(),
3764 "young list should be well formed");
3765
3766 // Don't dynamically change the number of GC threads this early. A value of
3767 // 0 is used to indicate serial work. When parallel work is done,
3768 // it will be set.
3769
3770 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3771 IsGCActiveMark x;
3772
3773 gc_prologue(false);
3774 increment_total_collections(false /* full gc */);
3775 increment_gc_time_stamp();
3776
3777 verify_before_gc();
3778
3779 COMPILER2_PRESENT(DerivedPointerTable::clear());
3780
3781 // Please see comment in g1CollectedHeap.hpp and
3782 // G1CollectedHeap::ref_processing_init() to see how
3783 // reference processing currently works in G1.
3784
3785 // Enable discovery in the STW reference processor
3786 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3787 true /*verify_no_refs*/);
3788
3789 {
3790 // We want to temporarily turn off discovery by the
3791 // CM ref processor, if necessary, and turn it back on
3792 // on again later if we do. Using a scoped
3793 // NoRefDiscovery object will do this.
3794 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3795
3796 // Forget the current alloc region (we might even choose it to be part
3797 // of the collection set!).
4002 // regions we just allocated to (i.e., the GC alloc
4003 // regions). However, during the last GC we called
4004 // set_saved_mark() on all the GC alloc regions, so card
4005 // scanning might skip the [saved_mark_word()...top()] area of
4006 // those regions (i.e., the area we allocated objects into
4007 // during the last GC). But it shouldn't. Given that
4008 // saved_mark_word() is conditional on whether the GC time stamp
4009 // on the region is current or not, by incrementing the GC time
4010 // stamp here we invalidate all the GC time stamps on all the
4011 // regions and saved_mark_word() will simply return top() for
4012 // all the regions. This is a nicer way of ensuring this rather
4013 // than iterating over the regions and fixing them. In fact, the
4014 // GC time stamp increment here also ensures that
4015 // saved_mark_word() will return top() between pauses, i.e.,
4016 // during concurrent refinement. So we don't need the
4017 // is_gc_active() check to decided which top to use when
4018 // scanning cards (see CR 7039627).
4019 increment_gc_time_stamp();
4020
4021 verify_after_gc();
4022
4023 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4024 ref_processor_stw()->verify_no_references_recorded();
4025
4026 // CM reference discovery will be re-enabled if necessary.
4027 }
4028
4029 // We should do this after we potentially expand the heap so
4030 // that all the COMMIT events are generated before the end GC
4031 // event, and after we retire the GC alloc regions so that all
4032 // RETIRE events are generated before the end GC event.
4033 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4034
4035 if (mark_in_progress()) {
4036 concurrent_mark()->update_g1_committed();
4037 }
4038
4039 #ifdef TRACESPINNING
4040 ParallelTaskTerminator::print_termination_counts();
4041 #endif
5724 if (hr->isHumongous()) {
5725 assert(hr->startsHumongous(), "we should only see starts humongous");
5726 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5727 } else {
5728 _old_set.remove_with_proxy(hr, old_proxy_set);
5729 free_region(hr, pre_used, free_list, par);
5730 }
5731 } else {
5732 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5733 }
5734 }
5735
5736 void G1CollectedHeap::free_region(HeapRegion* hr,
5737 size_t* pre_used,
5738 FreeRegionList* free_list,
5739 bool par) {
5740 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5741 assert(!hr->is_empty(), "the region should not be empty");
5742 assert(free_list != NULL, "pre-condition");
5743
5744 *pre_used += hr->used();
5745 hr->hr_clear(par, true /* clear_space */);
5746 free_list->add_as_head(hr);
5747 }
5748
5749 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5750 size_t* pre_used,
5751 FreeRegionList* free_list,
5752 HumongousRegionSet* humongous_proxy_set,
5753 bool par) {
5754 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5755 assert(free_list != NULL, "pre-condition");
5756 assert(humongous_proxy_set != NULL, "pre-condition");
5757
5758 size_t hr_used = hr->used();
5759 size_t hr_capacity = hr->capacity();
5760 size_t hr_pre_used = 0;
5761 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5762 // We need to read this before we make the region non-humongous,
5763 // otherwise the information will be gone.
5862 // retires each region and replaces it with a new one will do a
5863 // maximal allocation to fill in [pre_dummy_top(),end()] but will
5864 // not dirty that area (one less thing to have to do while holding
5865 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5866 // is dirty.
5867 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5868 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5869 ct_bs->verify_dirty_region(mr);
5870 }
5871
5872 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5873 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5874 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5875 verify_dirty_region(hr);
5876 }
5877 }
5878
5879 void G1CollectedHeap::verify_dirty_young_regions() {
5880 verify_dirty_young_list(_young_list->first_region());
5881 }
5882 #endif
5883
5884 void G1CollectedHeap::cleanUpCardTable() {
5885 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5886 double start = os::elapsedTime();
5887
5888 {
5889 // Iterate over the dirty cards region list.
5890 G1ParCleanupCTTask cleanup_task(ct_bs, this);
5891
5892 if (G1CollectedHeap::use_parallel_gc_threads()) {
5893 set_par_threads();
5894 workers()->run_task(&cleanup_task);
5895 set_par_threads(0);
5896 } else {
5897 while (_dirty_cards_region_list) {
5898 HeapRegion* r = _dirty_cards_region_list;
5899 cleanup_task.clear_cards(r);
5900 _dirty_cards_region_list = r->get_next_dirty_cards_region();
5901 if (_dirty_cards_region_list == r) {
5902 // The last region.
6244 return false;
6245 } else {
6246 return hr->is_in(p);
6247 }
6248 }
6249
6250 // Methods for the mutator alloc region
6251
6252 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6253 bool force) {
6254 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6255 assert(!force || g1_policy()->can_expand_young_list(),
6256 "if force is true we should be able to expand the young list");
6257 bool young_list_full = g1_policy()->is_young_list_full();
6258 if (force || !young_list_full) {
6259 HeapRegion* new_alloc_region = new_region(word_size,
6260 false /* do_expand */);
6261 if (new_alloc_region != NULL) {
6262 set_region_short_lived_locked(new_alloc_region);
6263 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6264 return new_alloc_region;
6265 }
6266 }
6267 return NULL;
6268 }
6269
6270 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6271 size_t allocated_bytes) {
6272 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6273 assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6274
6275 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6276 _summary_bytes_used += allocated_bytes;
6277 _hr_printer.retire(alloc_region);
6278 // We update the eden sizes here, when the region is retired,
6279 // instead of when it's allocated, since this is the point that its
6280 // used space has been recored in _summary_bytes_used.
6281 g1mm()->update_eden_size();
6282 }
6283
6308 }
6309
6310 // Methods for the GC alloc regions
6311
6312 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6313 uint count,
6314 GCAllocPurpose ap) {
6315 assert(FreeList_lock->owned_by_self(), "pre-condition");
6316
6317 if (count < g1_policy()->max_regions(ap)) {
6318 HeapRegion* new_alloc_region = new_region(word_size,
6319 true /* do_expand */);
6320 if (new_alloc_region != NULL) {
6321 // We really only need to do this for old regions given that we
6322 // should never scan survivors. But it doesn't hurt to do it
6323 // for survivors too.
6324 new_alloc_region->set_saved_mark();
6325 if (ap == GCAllocForSurvived) {
6326 new_alloc_region->set_survivor();
6327 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6328 } else {
6329 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6330 }
6331 bool during_im = g1_policy()->during_initial_mark_pause();
6332 new_alloc_region->note_start_of_copying(during_im);
6333 return new_alloc_region;
6334 } else {
6335 g1_policy()->note_alloc_region_limit_reached(ap);
6336 }
6337 }
6338 return NULL;
6339 }
6340
6341 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6342 size_t allocated_bytes,
6343 GCAllocPurpose ap) {
6344 bool during_im = g1_policy()->during_initial_mark_pause();
6345 alloc_region->note_end_of_copying(during_im);
6346 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6347 if (ap == GCAllocForSurvived) {
6348 young_list()->add_survivor_region(alloc_region);
6349 } else {
|
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
767 for (uint i = first + 1; i < last; ++i) {
768 hr = region_at(i);
769 if ((i + 1) == last) {
770 // last continues humongous region
771 assert(hr->bottom() < new_top && new_top <= hr->end(),
772 "new_top should fall on this region");
773 hr->set_top(new_top);
774 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
775 } else {
776 // not last one
777 assert(new_top > hr->end(), "new_top should be above this region");
778 hr->set_top(hr->end());
779 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
780 }
781 }
782 // If we have continues humongous regions (hr != NULL), then the
783 // end of the last one should match new_end and its top should
784 // match new_top.
785 assert(hr == NULL ||
786 (hr->end() == new_end && hr->top() == new_top), "sanity");
787 check_bitmaps("Humongous Region Allocation", first_hr);
788
789 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
790 _summary_bytes_used += first_hr->used();
791 _humongous_set.add(first_hr);
792
793 return new_obj;
794 }
795
796 // If could fit into free regions w/o expansion, try.
797 // Otherwise, if can expand, do so.
798 // Otherwise, if using ex regions might help, try with ex given back.
799 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
800 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
801
802 verify_region_sets_optional();
803
804 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
805 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
806 uint x_num = expansion_regions();
807 uint fs = _hrs.free_suffix();
1318 // following two methods.
1319 wait_while_free_regions_coming();
1320 // If we start the compaction before the CM threads finish
1321 // scanning the root regions we might trip them over as we'll
1322 // be moving objects / updating references. So let's wait until
1323 // they are done. By telling them to abort, they should complete
1324 // early.
1325 _cm->root_regions()->abort();
1326 _cm->root_regions()->wait_until_scan_finished();
1327 append_secondary_free_list_if_not_empty_with_lock();
1328
1329 gc_prologue(true);
1330 increment_total_collections(true /* full gc */);
1331 increment_old_marking_cycles_started();
1332
1333 size_t g1h_prev_used = used();
1334 assert(used() == recalculate_used(), "Should be equal");
1335
1336 verify_before_gc();
1337
1338 check_bitmaps("Full GC Start");
1339 pre_full_gc_dump();
1340
1341 COMPILER2_PRESENT(DerivedPointerTable::clear());
1342
1343 // Disable discovery and empty the discovered lists
1344 // for the CM ref processor.
1345 ref_processor_cm()->disable_discovery();
1346 ref_processor_cm()->abandon_partial_discovery();
1347 ref_processor_cm()->verify_no_references_recorded();
1348
1349 // Abandon current iterations of concurrent marking and concurrent
1350 // refinement, if any are in progress.
1351 concurrent_mark()->abort();
1352
1353 // Make sure we'll choose a new allocation region afterwards.
1354 release_mutator_alloc_region();
1355 abandon_gc_alloc_regions();
1356 g1_rem_set()->cleanupHRRS();
1357
1358 // We should call this after we retire any currently active alloc
1359 // regions so that all the ALLOC / RETIRE events are generated
1360 // before the start GC event.
1361 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1362
1363 // We may have added regions to the current incremental collection
1364 // set between the last GC or pause and now. We need to clear the
1365 // incremental collection set and then start rebuilding it afresh
1366 // after this full GC.
1367 abandon_collection_set(g1_policy()->inc_cset_head());
1368 g1_policy()->clear_incremental_cset();
1369 g1_policy()->stop_incremental_cset_building();
1370
1373
1374 // See the comments in g1CollectedHeap.hpp and
1375 // G1CollectedHeap::ref_processing_init() about
1376 // how reference processing currently works in G1.
1377
1378 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1379 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1380
1381 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1382 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1383
1384 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1385 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1386
1387 // Do collection work
1388 {
1389 HandleMark hm; // Discard invalid handles created during gc
1390 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1391 }
1392
1393 // Clear the previous marking bitmap, if needed for bitmap verification.
1394 // Note we cannot do this when we clear the next marking bitmap in
1395 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1396 // objects marked during a full GC against the previous bitmap.
1397 if (G1VerifyPrevBitmap) {
1398 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1399 }
1400
1401 assert(free_regions() == 0, "we should not have added any free regions");
1402 rebuild_region_sets(false /* free_list_only */);
1403
1404 // Enqueue any discovered reference objects that have
1405 // not been removed from the discovered lists.
1406 ref_processor_stw()->enqueue_discovered_references();
1407
1408 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1409
1410 MemoryService::track_memory_usage();
1411
1412 verify_after_gc();
1413
1414 check_bitmaps("Full GC End");
1415
1416 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1417 ref_processor_stw()->verify_no_references_recorded();
1418
1419 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1420 ClassLoaderDataGraph::purge();
1421
1422 // Note: since we've just done a full GC, concurrent
1423 // marking is no longer active. Therefore we need not
1424 // re-enable reference discovery for the CM ref processor.
1425 // That will be done at the start of the next marking cycle.
1426 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1427 ref_processor_cm()->verify_no_references_recorded();
1428
1429 reset_gc_time_stamp();
1430 // Since everything potentially moved, we will clear all remembered
1431 // sets, and clear all cards. Later we will rebuild remebered
1432 // sets. We will also reset the GC time stamps of the regions.
1433 clear_rsets_post_compaction();
1434 check_gc_time_stamps();
1435
3769 // get entries from the secondary_free_list.
3770 if (!G1StressConcRegionFreeing) {
3771 append_secondary_free_list_if_not_empty_with_lock();
3772 }
3773
3774 assert(check_young_list_well_formed(),
3775 "young list should be well formed");
3776
3777 // Don't dynamically change the number of GC threads this early. A value of
3778 // 0 is used to indicate serial work. When parallel work is done,
3779 // it will be set.
3780
3781 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3782 IsGCActiveMark x;
3783
3784 gc_prologue(false);
3785 increment_total_collections(false /* full gc */);
3786 increment_gc_time_stamp();
3787
3788 verify_before_gc();
3789 check_bitmaps("GC Start");
3790
3791 COMPILER2_PRESENT(DerivedPointerTable::clear());
3792
3793 // Please see comment in g1CollectedHeap.hpp and
3794 // G1CollectedHeap::ref_processing_init() to see how
3795 // reference processing currently works in G1.
3796
3797 // Enable discovery in the STW reference processor
3798 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3799 true /*verify_no_refs*/);
3800
3801 {
3802 // We want to temporarily turn off discovery by the
3803 // CM ref processor, if necessary, and turn it back on
3804 // on again later if we do. Using a scoped
3805 // NoRefDiscovery object will do this.
3806 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3807
3808 // Forget the current alloc region (we might even choose it to be part
3809 // of the collection set!).
4014 // regions we just allocated to (i.e., the GC alloc
4015 // regions). However, during the last GC we called
4016 // set_saved_mark() on all the GC alloc regions, so card
4017 // scanning might skip the [saved_mark_word()...top()] area of
4018 // those regions (i.e., the area we allocated objects into
4019 // during the last GC). But it shouldn't. Given that
4020 // saved_mark_word() is conditional on whether the GC time stamp
4021 // on the region is current or not, by incrementing the GC time
4022 // stamp here we invalidate all the GC time stamps on all the
4023 // regions and saved_mark_word() will simply return top() for
4024 // all the regions. This is a nicer way of ensuring this rather
4025 // than iterating over the regions and fixing them. In fact, the
4026 // GC time stamp increment here also ensures that
4027 // saved_mark_word() will return top() between pauses, i.e.,
4028 // during concurrent refinement. So we don't need the
4029 // is_gc_active() check to decided which top to use when
4030 // scanning cards (see CR 7039627).
4031 increment_gc_time_stamp();
4032
4033 verify_after_gc();
4034 check_bitmaps("GC End");
4035
4036 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4037 ref_processor_stw()->verify_no_references_recorded();
4038
4039 // CM reference discovery will be re-enabled if necessary.
4040 }
4041
4042 // We should do this after we potentially expand the heap so
4043 // that all the COMMIT events are generated before the end GC
4044 // event, and after we retire the GC alloc regions so that all
4045 // RETIRE events are generated before the end GC event.
4046 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4047
4048 if (mark_in_progress()) {
4049 concurrent_mark()->update_g1_committed();
4050 }
4051
4052 #ifdef TRACESPINNING
4053 ParallelTaskTerminator::print_termination_counts();
4054 #endif
5737 if (hr->isHumongous()) {
5738 assert(hr->startsHumongous(), "we should only see starts humongous");
5739 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5740 } else {
5741 _old_set.remove_with_proxy(hr, old_proxy_set);
5742 free_region(hr, pre_used, free_list, par);
5743 }
5744 } else {
5745 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5746 }
5747 }
5748
5749 void G1CollectedHeap::free_region(HeapRegion* hr,
5750 size_t* pre_used,
5751 FreeRegionList* free_list,
5752 bool par) {
5753 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5754 assert(!hr->is_empty(), "the region should not be empty");
5755 assert(free_list != NULL, "pre-condition");
5756
5757 if (G1VerifyPrevBitmap) {
5758 MemRegion mr(hr->bottom(), hr->end());
5759 concurrent_mark()->clearRangePrevBitmap(mr);
5760 }
5761 *pre_used += hr->used();
5762 hr->hr_clear(par, true /* clear_space */);
5763 free_list->add_as_head(hr);
5764 }
5765
5766 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5767 size_t* pre_used,
5768 FreeRegionList* free_list,
5769 HumongousRegionSet* humongous_proxy_set,
5770 bool par) {
5771 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5772 assert(free_list != NULL, "pre-condition");
5773 assert(humongous_proxy_set != NULL, "pre-condition");
5774
5775 size_t hr_used = hr->used();
5776 size_t hr_capacity = hr->capacity();
5777 size_t hr_pre_used = 0;
5778 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5779 // We need to read this before we make the region non-humongous,
5780 // otherwise the information will be gone.
5879 // retires each region and replaces it with a new one will do a
5880 // maximal allocation to fill in [pre_dummy_top(),end()] but will
5881 // not dirty that area (one less thing to have to do while holding
5882 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5883 // is dirty.
5884 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5885 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5886 ct_bs->verify_dirty_region(mr);
5887 }
5888
5889 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5890 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5891 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5892 verify_dirty_region(hr);
5893 }
5894 }
5895
5896 void G1CollectedHeap::verify_dirty_young_regions() {
5897 verify_dirty_young_list(_young_list->first_region());
5898 }
5899
5900 bool G1CollectedHeap::verify_bitmap(const char* bitmap_name, CMBitMapRO* bitmap,
5901 HeapWord* tams, HeapWord* end) {
5902 guarantee(tams <= end,
5903 err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end));
5904 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5905 guarantee(result <= end,
5906 err_msg("result: "PTR_FORMAT" end: "PTR_FORMAT, result, end));
5907 if (result < end) {
5908 gclog_or_tty->cr();
5909 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
5910 bitmap_name, result);
5911 gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
5912 bitmap_name, tams, end);
5913 return false;
5914 }
5915 return true;
5916 }
5917
5918 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5919 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5920 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5921
5922 HeapWord* bottom = hr->bottom();
5923 HeapWord* ptams = hr->prev_top_at_mark_start();
5924 HeapWord* ntams = hr->next_top_at_mark_start();
5925 HeapWord* end = hr->end();
5926
5927 bool res_p = true;
5928 if (G1VerifyPrevBitmap) {
5929 res_p = verify_bitmap("prev", prev_bitmap, ptams, end);
5930 }
5931
5932 bool res_n = true;
5933 if (mark_in_progress() || !_cmThread->in_progress()) {
5934 res_n = verify_bitmap("next", next_bitmap, ntams, end);
5935 }
5936 if (!res_p || !res_n) {
5937 gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
5938 HR_FORMAT_PARAMS(hr));
5939 gclog_or_tty->print_cr("#### Caller: %s", caller);
5940 return false;
5941 }
5942 return true;
5943 }
5944
5945 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5946 if (!G1VerifyBitmaps) return;
5947
5948 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5949 }
5950
5951 class G1VerifyBitmapClosure : public HeapRegionClosure {
5952 private:
5953 const char* _caller;
5954 G1CollectedHeap* _g1h;
5955 bool _failures;
5956
5957 public:
5958 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5959 _caller(caller), _g1h(g1h), _failures(false) { }
5960
5961 bool failures() { return _failures; }
5962
5963 virtual bool doHeapRegion(HeapRegion* hr) {
5964 if (hr->continuesHumongous()) return false;
5965
5966 bool result = _g1h->verify_bitmaps(_caller, hr);
5967 if (!result) {
5968 _failures = true;
5969 }
5970 return false;
5971 }
5972 };
5973
5974 void G1CollectedHeap::check_bitmaps(const char* caller) {
5975 if (!G1VerifyBitmaps) return;
5976
5977 G1VerifyBitmapClosure cl(caller, this);
5978 heap_region_iterate(&cl);
5979 guarantee(!cl.failures(), "bitmap verification");
5980 }
5981 #endif // PRODUCT
5982
5983 void G1CollectedHeap::cleanUpCardTable() {
5984 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5985 double start = os::elapsedTime();
5986
5987 {
5988 // Iterate over the dirty cards region list.
5989 G1ParCleanupCTTask cleanup_task(ct_bs, this);
5990
5991 if (G1CollectedHeap::use_parallel_gc_threads()) {
5992 set_par_threads();
5993 workers()->run_task(&cleanup_task);
5994 set_par_threads(0);
5995 } else {
5996 while (_dirty_cards_region_list) {
5997 HeapRegion* r = _dirty_cards_region_list;
5998 cleanup_task.clear_cards(r);
5999 _dirty_cards_region_list = r->get_next_dirty_cards_region();
6000 if (_dirty_cards_region_list == r) {
6001 // The last region.
6343 return false;
6344 } else {
6345 return hr->is_in(p);
6346 }
6347 }
6348
6349 // Methods for the mutator alloc region
6350
6351 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6352 bool force) {
6353 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6354 assert(!force || g1_policy()->can_expand_young_list(),
6355 "if force is true we should be able to expand the young list");
6356 bool young_list_full = g1_policy()->is_young_list_full();
6357 if (force || !young_list_full) {
6358 HeapRegion* new_alloc_region = new_region(word_size,
6359 false /* do_expand */);
6360 if (new_alloc_region != NULL) {
6361 set_region_short_lived_locked(new_alloc_region);
6362 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6363 check_bitmaps("Mutator Region Allocation", new_alloc_region);
6364 return new_alloc_region;
6365 }
6366 }
6367 return NULL;
6368 }
6369
6370 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6371 size_t allocated_bytes) {
6372 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6373 assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6374
6375 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6376 _summary_bytes_used += allocated_bytes;
6377 _hr_printer.retire(alloc_region);
6378 // We update the eden sizes here, when the region is retired,
6379 // instead of when it's allocated, since this is the point that its
6380 // used space has been recored in _summary_bytes_used.
6381 g1mm()->update_eden_size();
6382 }
6383
6408 }
6409
6410 // Methods for the GC alloc regions
6411
6412 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6413 uint count,
6414 GCAllocPurpose ap) {
6415 assert(FreeList_lock->owned_by_self(), "pre-condition");
6416
6417 if (count < g1_policy()->max_regions(ap)) {
6418 HeapRegion* new_alloc_region = new_region(word_size,
6419 true /* do_expand */);
6420 if (new_alloc_region != NULL) {
6421 // We really only need to do this for old regions given that we
6422 // should never scan survivors. But it doesn't hurt to do it
6423 // for survivors too.
6424 new_alloc_region->set_saved_mark();
6425 if (ap == GCAllocForSurvived) {
6426 new_alloc_region->set_survivor();
6427 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6428 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6429 } else {
6430 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6431 check_bitmaps("Old Region Allocation", new_alloc_region);
6432 }
6433 bool during_im = g1_policy()->during_initial_mark_pause();
6434 new_alloc_region->note_start_of_copying(during_im);
6435 return new_alloc_region;
6436 } else {
6437 g1_policy()->note_alloc_region_limit_reached(ap);
6438 }
6439 }
6440 return NULL;
6441 }
6442
6443 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6444 size_t allocated_bytes,
6445 GCAllocPurpose ap) {
6446 bool during_im = g1_policy()->during_initial_mark_pause();
6447 alloc_region->note_end_of_copying(during_im);
6448 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6449 if (ap == GCAllocForSurvived) {
6450 young_list()->add_survivor_region(alloc_region);
6451 } else {
|