< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 10311 : 8151178 Move the collection set out of the G1 collector policy
Reviewed-by:
rev 10312 : imported patch remove-cset-from-name
rev 10313 : imported patch fix-override-policy

*** 32,41 **** --- 32,42 ---- #include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/concurrentG1RefineThread.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" + #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HeapTransition.hpp"
*** 1300,1312 **** // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC. ! abandon_collection_set(g1_policy()->inc_cset_head()); ! g1_policy()->clear_incremental_cset(); ! g1_policy()->stop_incremental_cset_building(); tear_down_region_sets(false /* free_list_only */); collector_state()->set_gcs_are_young(true); // See the comments in g1CollectedHeap.hpp and --- 1301,1313 ---- // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC. ! abandon_collection_set(collection_set()->inc_head()); ! collection_set()->clear_incremental(); ! collection_set()->stop_incremental_building(); tear_down_region_sets(false /* free_list_only */); collector_state()->set_gcs_are_young(true); // See the comments in g1CollectedHeap.hpp and
*** 1424,1435 **** ((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll(); } _verifier->check_bitmaps("Full GC End"); // Start a new incremental collection set for the next pause ! assert(g1_policy()->collection_set() == NULL, "must be"); ! g1_policy()->start_incremental_cset_building(); clear_cset_fast_test(); _allocator->init_mutator_alloc_region(); --- 1425,1436 ---- ((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll(); } _verifier->check_bitmaps("Full GC End"); // Start a new incremental collection set for the next pause ! assert(collection_set()->head() == NULL, "must be"); ! collection_set()->start_incremental_building(); clear_cset_fast_test(); _allocator->init_mutator_alloc_region();
*** 1739,1748 **** --- 1740,1750 ---- // Public methods. G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : CollectedHeap(), _g1_policy(policy_), + _collection_set(new G1CollectionSet(this, policy_)), _dirty_card_queue_set(false), _is_alive_closure_cm(this), _is_alive_closure_stw(this), _ref_processor_cm(NULL), _ref_processor_stw(NULL),
*** 2539,2550 **** // If we have: // n collection set regions // p threads // Then thread t will start at region floor ((t * n) / p) ! result = g1_policy()->collection_set(); ! uint cs_size = g1_policy()->cset_region_length(); uint active_workers = workers()->active_workers(); uint end_ind = (cs_size * worker_i) / active_workers; uint start_ind = 0; --- 2541,2552 ---- // If we have: // n collection set regions // p threads // Then thread t will start at region floor ((t * n) / p) ! result = collection_set()->head(); ! uint cs_size = collection_set()->region_length(); uint active_workers = workers()->active_workers(); uint end_ind = (cs_size * worker_i) / active_workers; uint start_ind = 0;
*** 2571,2581 **** _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp; return result; } void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { ! HeapRegion* r = g1_policy()->collection_set(); while (r != NULL) { HeapRegion* next = r->next_in_collection_set(); if (cl->doHeapRegion(r)) { cl->incomplete(); return; --- 2573,2583 ---- _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp; return result; } void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { ! HeapRegion* r = collection_set()->head(); while (r != NULL) { HeapRegion* next = r->next_in_collection_set(); if (cl->doHeapRegion(r)) { cl->incomplete(); return;
*** 2600,2610 **** cl->incomplete(); return; } cur = next; } ! cur = g1_policy()->collection_set(); while (cur != r) { HeapRegion* next = cur->next_in_collection_set(); if (cl->doHeapRegion(cur) && false) { cl->incomplete(); return; --- 2602,2612 ---- cl->incomplete(); return; } cur = next; } ! cur = collection_set()->head(); while (cur != r) { HeapRegion* next = cur->next_in_collection_set(); if (cl->doHeapRegion(cur) && false) { cl->incomplete(); return;
*** 3330,3343 **** if (collector_state()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } ! double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms); ! g1_policy()->finalize_old_cset_part(time_remaining_ms); ! evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); // Make sure the remembered sets are up to date. This needs to be // done before register_humongous_regions_with_cset(), because the // remembered sets are used there to choose eager reclaim candidates. // If the remembered sets are not up to date we might miss some --- 3332,3344 ---- if (collector_state()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } ! g1_policy()->finalize_collection_set(target_pause_time_ms); ! evacuation_info.set_collectionset_regions(collection_set()->region_length()); // Make sure the remembered sets are up to date. This needs to be // done before register_humongous_regions_with_cset(), because the // remembered sets are used there to choose eager reclaim candidates. // If the remembered sets are not up to date we might miss some
*** 3352,3362 **** // We call this after finalize_cset() to // ensure that the CSet has been finalized. _cm->verify_no_cset_oops(); if (_hr_printer.is_active()) { ! HeapRegion* hr = g1_policy()->collection_set(); while (hr != NULL) { _hr_printer.cset(hr); hr = hr->next_in_collection_set(); } } --- 3353,3363 ---- // We call this after finalize_cset() to // ensure that the CSet has been finalized. _cm->verify_no_cset_oops(); if (_hr_printer.is_active()) { ! HeapRegion* hr = collection_set()->head(); while (hr != NULL) { _hr_printer.cset(hr); hr = hr->next_in_collection_set(); } }
*** 3367,3393 **** #endif // ASSERT // Initialize the GC alloc regions. _allocator->init_gc_alloc_regions(evacuation_info); ! G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length()); pre_evacuate_collection_set(); // Actually do the work... evacuate_collection_set(evacuation_info, &per_thread_states); post_evacuate_collection_set(evacuation_info, &per_thread_states); const size_t* surviving_young_words = per_thread_states.surviving_young_words(); ! free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words); eagerly_reclaim_humongous_regions(); ! g1_policy()->clear_collection_set(); // Start a new incremental collection set for the next pause. ! g1_policy()->start_incremental_cset_building(); clear_cset_fast_test(); // Don't check the whole heap at this point as the // GC alloc regions from this pause have been tagged --- 3368,3394 ---- #endif // ASSERT // Initialize the GC alloc regions. _allocator->init_gc_alloc_regions(evacuation_info); ! G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length()); pre_evacuate_collection_set(); // Actually do the work... evacuate_collection_set(evacuation_info, &per_thread_states); post_evacuate_collection_set(evacuation_info, &per_thread_states); const size_t* surviving_young_words = per_thread_states.surviving_young_words(); ! free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words); eagerly_reclaim_humongous_regions(); ! collection_set()->clear_head(); // Start a new incremental collection set for the next pause. ! collection_set()->start_incremental_building(); clear_cset_fast_test(); // Don't check the whole heap at this point as the // GC alloc regions from this pause have been tagged
*** 3458,3468 **** double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; size_t total_cards_scanned = per_thread_states.total_cards_scanned(); g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); ! evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); MemoryService::track_memory_usage(); // In prepare_for_verify() below we'll need to scan the deferred --- 3459,3469 ---- double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; size_t total_cards_scanned = per_thread_states.total_cards_scanned(); g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); ! evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); MemoryService::track_memory_usage(); // In prepare_for_verify() below we'll need to scan the deferred
*** 4889,4899 **** clear_in_cset(cur); if (cur->is_young()) { int index = cur->young_index_in_cset(); assert(index != -1, "invariant"); ! assert((uint) index < policy->young_cset_region_length(), "invariant"); size_t words_survived = surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); // At this point the we have 'popped' cur from the collection set // (linked via next_in_collection_set()) but it is still in the --- 4890,4900 ---- clear_in_cset(cur); if (cur->is_young()) { int index = cur->young_index_in_cset(); assert(index != -1, "invariant"); ! assert((uint) index < collection_set()->young_region_length(), "invariant"); size_t words_survived = surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); // At this point the we have 'popped' cur from the collection set // (linked via next_in_collection_set()) but it is still in the
*** 5362,5372 **** void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); ! g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used. --- 5363,5373 ---- void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); ! collection_set()->add_eden_region(alloc_region); increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used.
< prev index next >