< prev index next >
src/share/vm/gc/g1/g1CollectedHeap.cpp
Print this page
rev 11545 : [mq]: 8159978-collection-set-as-array
rev 11546 : [mq]: 8159978-erikh-review
*** 1254,1266 ****
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
! abandon_collection_set(collection_set()->inc_head());
! collection_set()->clear_incremental();
! collection_set()->stop_incremental_building();
tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true);
// See the comments in g1CollectedHeap.hpp and
--- 1254,1264 ----
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
! abandon_collection_set(collection_set());
tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true);
// See the comments in g1CollectedHeap.hpp and
*** 1377,1387 ****
_cm->clear_prev_bitmap(workers());
}
_verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
- assert(collection_set()->head() == NULL, "must be");
collection_set()->start_incremental_building();
clear_cset_fast_test();
_allocator->init_mutator_alloc_region();
--- 1375,1384 ----
*** 1722,1733 ****
_old_evac_stats("Old", OldPLABSize, PLABWeight),
_expand_heap_after_alloc_failure(true),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_in_cset_fast_test(),
- _worker_cset_start_region(NULL),
- _worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
--- 1719,1728 ----
*** 1746,1766 ****
_filler_array_max_size = _humongous_object_threshold_in_words;
uint n_queues = ParallelGCThreads;
_task_queues = new RefToScanQueueSet(n_queues);
- _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
- _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (uint i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue();
q->initialize();
_task_queues->register_queue(i, q);
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
}
- clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags.
NOT_PRODUCT(reset_evacuation_should_fail();)
guarantee(_task_queues != NULL, "task_queues allocation failure.");
--- 1741,1758 ----
*** 1985,1994 ****
--- 1977,1988 ----
G1StringDedup::initialize();
_preserved_marks_set.init(ParallelGCThreads);
+ _collection_set.set_max_length(max_regions());
+
return JNI_OK;
}
void G1CollectedHeap::stop() {
// Stop all concurrent threads. We do this to make sure these threads
*** 2418,2538 ****
HeapRegionClaimer *hrclaimer,
bool concurrent) const {
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
- // Clear the cached CSet starting regions and (more importantly)
- // the time stamps. Called when we reset the GC time stamp.
- void G1CollectedHeap::clear_cset_start_regions() {
- assert(_worker_cset_start_region != NULL, "sanity");
- assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
-
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _worker_cset_start_region[i] = NULL;
- _worker_cset_start_region_time_stamp[i] = 0;
- }
- }
-
- // Given the id of a worker, obtain or calculate a suitable
- // starting region for iterating over the current collection set.
- HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
- assert(get_gc_time_stamp() > 0, "should have been updated by now");
-
- HeapRegion* result = NULL;
- unsigned gc_time_stamp = get_gc_time_stamp();
-
- if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
- // Cached starting region for current worker was set
- // during the current pause - so it's valid.
- // Note: the cached starting heap region may be NULL
- // (when the collection set is empty).
- result = _worker_cset_start_region[worker_i];
- assert(result == NULL || result->in_collection_set(), "sanity");
- return result;
- }
-
- // The cached entry was not valid so let's calculate
- // a suitable starting heap region for this worker.
-
- // We want the parallel threads to start their collection
- // set iteration at different collection set regions to
- // avoid contention.
- // If we have:
- // n collection set regions
- // p threads
- // Then thread t will start at region floor ((t * n) / p)
-
- result = collection_set()->head();
- uint cs_size = collection_set()->region_length();
- uint active_workers = workers()->active_workers();
-
- uint end_ind = (cs_size * worker_i) / active_workers;
- uint start_ind = 0;
-
- if (worker_i > 0 &&
- _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
- // Previous workers starting region is valid
- // so let's iterate from there
- start_ind = (cs_size * (worker_i - 1)) / active_workers;
- OrderAccess::loadload();
- result = _worker_cset_start_region[worker_i - 1];
- }
-
- for (uint i = start_ind; i < end_ind; i++) {
- result = result->next_in_collection_set();
- }
-
- // Note: the calculated starting heap region may be NULL
- // (when the collection set is empty).
- assert(result == NULL || result->in_collection_set(), "sanity");
- assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
- "should be updated only once per pause");
- _worker_cset_start_region[worker_i] = result;
- OrderAccess::storestore();
- _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
- return result;
- }
-
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
! HeapRegion* r = collection_set()->head();
! while (r != NULL) {
! HeapRegion* next = r->next_in_collection_set();
! if (cl->doHeapRegion(r)) {
! cl->incomplete();
! return;
! }
! r = next;
! }
}
! void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
! HeapRegionClosure *cl) {
! if (r == NULL) {
! // The CSet is empty so there's nothing to do.
! return;
! }
!
! assert(r->in_collection_set(),
! "Start region must be a member of the collection set.");
! HeapRegion* cur = r;
! while (cur != NULL) {
! HeapRegion* next = cur->next_in_collection_set();
! if (cl->doHeapRegion(cur) && false) {
! cl->incomplete();
! return;
! }
! cur = next;
! }
! cur = collection_set()->head();
! while (cur != r) {
! HeapRegion* next = cur->next_in_collection_set();
! if (cl->doHeapRegion(cur) && false) {
! cl->incomplete();
! return;
! }
! cur = next;
! }
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
HeapRegion* result = _hrm.next_region_in_heap(from);
while (result != NULL && result->is_pinned()) {
--- 2412,2427 ----
HeapRegionClaimer *hrclaimer,
bool concurrent) const {
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
! _collection_set.iterate(cl);
}
! void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
! _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
HeapRegion* result = _hrm.next_region_in_heap(from);
while (result != NULL && result->is_pinned()) {
*** 3088,3097 ****
--- 2977,2994 ----
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
}
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
}
+ class G1PrintCollectionSetClosure : public HeapRegionClosure {
+ public:
+ virtual bool doHeapRegion(HeapRegion* r) {
+ G1CollectedHeap::heap()->hr_printer()->cset(r);
+ return false;
+ }
+ };
+
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */);
guarantee(!is_gc_active(), "collection is not reentrant");
*** 3266,3280 ****
// We call this after finalize_cset() to
// ensure that the CSet has been finalized.
_cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
! HeapRegion* hr = collection_set()->head();
! while (hr != NULL) {
! _hr_printer.cset(hr);
! hr = hr->next_in_collection_set();
! }
}
// Initialize the GC alloc regions.
_allocator->init_gc_alloc_regions(evacuation_info);
--- 3163,3174 ----
// We call this after finalize_cset() to
// ensure that the CSet has been finalized.
_cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
! G1PrintCollectionSetClosure cl;
! _collection_set.iterate(&cl);
}
// Initialize the GC alloc regions.
_allocator->init_gc_alloc_regions(evacuation_info);
*** 3285,3300 ****
evacuate_collection_set(evacuation_info, &per_thread_states);
post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words();
! free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions();
- collection_set()->clear_head();
-
record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
// Start a new incremental collection set for the next pause.
--- 3179,3192 ----
evacuate_collection_set(evacuation_info, &per_thread_states);
post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words();
! free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions();
record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
// Start a new incremental collection set for the next pause.
*** 4702,4825 ****
uint num_workers = workers()->active_workers();
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
workers()->run_task(&g1_par_scrub_rs_task);
}
! void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
! size_t pre_used = 0;
! FreeRegionList local_free_list("Local List for CSet Freeing");
!
! double young_time_ms = 0.0;
! double non_young_time_ms = 0.0;
!
! _eden.clear();
!
! G1Policy* policy = g1_policy();
!
! double start_sec = os::elapsedTime();
! bool non_young = true;
!
! HeapRegion* cur = cs_head;
! int age_bound = -1;
! size_t rs_lengths = 0;
! while (cur != NULL) {
! assert(!is_on_master_free_list(cur), "sanity");
! if (non_young) {
! if (cur->is_young()) {
! double end_sec = os::elapsedTime();
! double elapsed_ms = (end_sec - start_sec) * 1000.0;
! non_young_time_ms += elapsed_ms;
! start_sec = os::elapsedTime();
! non_young = false;
}
- } else {
- if (!cur->is_young()) {
- double end_sec = os::elapsedTime();
- double elapsed_ms = (end_sec - start_sec) * 1000.0;
- young_time_ms += elapsed_ms;
! start_sec = os::elapsedTime();
! non_young = true;
! }
! }
! rs_lengths += cur->rem_set()->occupied_locked();
! HeapRegion* next = cur->next_in_collection_set();
! assert(cur->in_collection_set(), "bad CS");
! cur->set_next_in_collection_set(NULL);
! clear_in_cset(cur);
! if (cur->is_young()) {
! int index = cur->young_index_in_cset();
! assert(index != -1, "invariant");
! assert((uint) index < collection_set()->young_region_length(), "invariant");
! size_t words_survived = surviving_young_words[index];
! cur->record_surv_words_in_group(words_survived);
} else {
! int index = cur->young_index_in_cset();
! assert(index == -1, "invariant");
}
! assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
! (!cur->is_young() && cur->young_index_in_cset() == -1),
! "invariant" );
!
! if (!cur->evacuation_failed()) {
! MemRegion used_mr = cur->used_region();
// And the region is empty.
! assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
! pre_used += cur->used();
! free_region(cur, &local_free_list, false /* par */, true /* locked */);
! } else {
! cur->uninstall_surv_rate_group();
! if (cur->is_young()) {
! cur->set_young_index_in_cset(-1);
! }
! cur->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
! if (cur->is_young()) {
! policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
}
// The region is now considered to be old.
! cur->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
! size_t used_words = cur->marked_bytes() / HeapWordSize;
! _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
! _old_set.add(cur);
! evacuation_info.increment_collectionset_used_after(cur->used());
}
! cur = next;
}
! evacuation_info.set_regions_freed(local_free_list.length());
! policy->record_max_rs_lengths(rs_lengths);
policy->cset_regions_freed();
! double end_sec = os::elapsedTime();
! double elapsed_ms = (end_sec - start_sec) * 1000.0;
! if (non_young) {
! non_young_time_ms += elapsed_ms;
! } else {
! young_time_ms += elapsed_ms;
! }
! prepend_to_freelist(&local_free_list);
! decrement_summary_bytes(pre_used);
! policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
! policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
}
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
private:
FreeRegionList* _free_region_list;
--- 4594,4718 ----
uint num_workers = workers()->active_workers();
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
workers()->run_task(&g1_par_scrub_rs_task);
}
! class G1FreeCollectionSetClosure : public HeapRegionClosure {
! private:
! const size_t* _surviving_young_words;
! FreeRegionList _local_free_list;
! size_t _rs_lengths;
! // Bytes used in successfully evacuated regions before the evacuation.
! size_t _before_used_bytes;
! // Bytes used in unsucessfully evacuated regions before the evacuation
! size_t _after_used_bytes;
! double _young_time;
! double _non_young_time;
! public:
! G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
! HeapRegionClosure(),
! _surviving_young_words(surviving_young_words),
! _local_free_list("Local Region List for CSet Freeing"),
! _rs_lengths(0),
! _before_used_bytes(0),
! _after_used_bytes(0),
! _young_time(0.0),
! _non_young_time(0.0) {
}
! virtual bool doHeapRegion(HeapRegion* r) {
! double start_time = os::elapsedTime();
! G1CollectedHeap* g1h = G1CollectedHeap::heap();
! assert(!g1h->is_on_master_free_list(r), "sanity");
! _rs_lengths += r->rem_set()->occupied_locked();
! assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
! g1h->clear_in_cset(r);
+ if (r->is_young()) {
+ int index = r->young_index_in_cset();
+ assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
+ assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
+ size_t words_survived = _surviving_young_words[index];
+ r->record_surv_words_in_group(words_survived);
} else {
! assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
}
! if (!r->evacuation_failed()) {
! MemRegion used_mr = r->used_region();
// And the region is empty.
! assert(!used_mr.is_empty(), "Should not have empty regions in a collection set.");
! _before_used_bytes += r->used();
! g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
! } else {
! r->uninstall_surv_rate_group();
! r->set_young_index_in_cset(-1);
! r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
! if (r->is_young()) {
! g1h->g1_policy()->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
}
// The region is now considered to be old.
! r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
! size_t used_words = r->marked_bytes() / HeapWordSize;
!
! g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
! g1h->old_set_add(r);
! _after_used_bytes += r->used();
}
!
! if (r->is_young()) {
! _young_time += os::elapsedTime() - start_time;
! } else {
! _non_young_time += os::elapsedTime() - start_time;
! }
! return false;
}
! FreeRegionList* local_free_list() { return &_local_free_list; }
! size_t rs_lengths() const { return _rs_lengths; }
! size_t before_used_bytes() const { return _before_used_bytes; }
! size_t after_used_bytes() const { return _after_used_bytes; }
!
! double young_time() const { return _young_time; }
! double non_young_time() const { return _non_young_time; }
! };
!
! void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
! _eden.clear();
!
! G1FreeCollectionSetClosure cl(surviving_young_words);
! collection_set_iterate(&cl);
!
! evacuation_info.set_regions_freed(cl.local_free_list()->length());
! evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
!
! G1Policy* policy = g1_policy();
!
! policy->record_max_rs_lengths(cl.rs_lengths());
policy->cset_regions_freed();
! prepend_to_freelist(cl.local_free_list());
! decrement_summary_bytes(cl.before_used_bytes());
! policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
! policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
! collection_set->clear();
}
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
private:
FreeRegionList* _free_region_list;
*** 4958,4986 ****
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
cl.humongous_free_count());
}
! // This routine is similar to the above but does not record
! // any policy statistics or update free lists; we are abandoning
! // the current incremental collection set in preparation of a
! // full collection. After the full GC we will start to build up
! // the incremental collection set again.
! // This is only called when we're doing a full collection
! // and is immediately followed by the tearing down of the young list.
!
! void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
! HeapRegion* cur = cs_head;
!
! while (cur != NULL) {
! HeapRegion* next = cur->next_in_collection_set();
! assert(cur->in_collection_set(), "bad CS");
! cur->set_next_in_collection_set(NULL);
! clear_in_cset(cur);
! cur->set_young_index_in_cset(-1);
! cur = next;
}
}
void G1CollectedHeap::set_free_regions_coming() {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
--- 4851,4876 ----
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
cl.humongous_free_count());
}
! class G1AbandonCollectionSetClosure : public HeapRegionClosure {
! public:
! virtual bool doHeapRegion(HeapRegion* r) {
! assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
! G1CollectedHeap::heap()->clear_in_cset(r);
! r->set_young_index_in_cset(-1);
! return false;
}
+ };
+
+ void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
+ G1AbandonCollectionSetClosure cl;
+ collection_set->iterate(&cl);
+
+ collection_set->clear();
+ collection_set->stop_incremental_building();
}
void G1CollectedHeap::set_free_regions_coming() {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
< prev index next >