--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-06-28 14:12:48.338921584 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-06-28 14:12:48.246918813 +0200 @@ -1256,9 +1256,7 @@ // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC. - abandon_collection_set(collection_set()->inc_head()); - collection_set()->clear_incremental(); - collection_set()->stop_incremental_building(); + abandon_collection_set(collection_set()); tear_down_region_sets(false /* free_list_only */); collector_state()->set_gcs_are_young(true); @@ -1379,7 +1377,6 @@ _verifier->check_bitmaps("Full GC End"); // Start a new incremental collection set for the next pause - assert(collection_set()->head() == NULL, "must be"); collection_set()->start_incremental_building(); clear_cset_fast_test(); @@ -1724,8 +1721,6 @@ _old_marking_cycles_started(0), _old_marking_cycles_completed(0), _in_cset_fast_test(), - _worker_cset_start_region(NULL), - _worker_cset_start_region_time_stamp(NULL), _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) { @@ -1748,8 +1743,6 @@ uint n_queues = ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); - _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC); _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); for (uint i = 0; i < n_queues; i++) { @@ -1758,7 +1751,6 @@ _task_queues->register_queue(i, q); ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); } - clear_cset_start_regions(); // Initialize the G1EvacuationFailureALot counters and flags. NOT_PRODUCT(reset_evacuation_should_fail();) @@ -1987,6 +1979,8 @@ _preserved_marks_set.init(ParallelGCThreads); + _collection_set.set_max_length(max_regions()); + return JNI_OK; } @@ -2420,117 +2414,12 @@ _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent); } -// Clear the cached CSet starting regions and (more importantly) -// the time stamps. Called when we reset the GC time stamp. -void G1CollectedHeap::clear_cset_start_regions() { - assert(_worker_cset_start_region != NULL, "sanity"); - assert(_worker_cset_start_region_time_stamp != NULL, "sanity"); - - for (uint i = 0; i < ParallelGCThreads; i++) { - _worker_cset_start_region[i] = NULL; - _worker_cset_start_region_time_stamp[i] = 0; - } -} - -// Given the id of a worker, obtain or calculate a suitable -// starting region for iterating over the current collection set. -HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) { - assert(get_gc_time_stamp() > 0, "should have been updated by now"); - - HeapRegion* result = NULL; - unsigned gc_time_stamp = get_gc_time_stamp(); - - if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) { - // Cached starting region for current worker was set - // during the current pause - so it's valid. - // Note: the cached starting heap region may be NULL - // (when the collection set is empty). - result = _worker_cset_start_region[worker_i]; - assert(result == NULL || result->in_collection_set(), "sanity"); - return result; - } - - // The cached entry was not valid so let's calculate - // a suitable starting heap region for this worker. - - // We want the parallel threads to start their collection - // set iteration at different collection set regions to - // avoid contention. - // If we have: - // n collection set regions - // p threads - // Then thread t will start at region floor ((t * n) / p) - - result = collection_set()->head(); - uint cs_size = collection_set()->region_length(); - uint active_workers = workers()->active_workers(); - - uint end_ind = (cs_size * worker_i) / active_workers; - uint start_ind = 0; - - if (worker_i > 0 && - _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) { - // Previous workers starting region is valid - // so let's iterate from there - start_ind = (cs_size * (worker_i - 1)) / active_workers; - OrderAccess::loadload(); - result = _worker_cset_start_region[worker_i - 1]; - } - - for (uint i = start_ind; i < end_ind; i++) { - result = result->next_in_collection_set(); - } - - // Note: the calculated starting heap region may be NULL - // (when the collection set is empty). - assert(result == NULL || result->in_collection_set(), "sanity"); - assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp, - "should be updated only once per pause"); - _worker_cset_start_region[worker_i] = result; - OrderAccess::storestore(); - _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp; - return result; -} - void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { - HeapRegion* r = collection_set()->head(); - while (r != NULL) { - HeapRegion* next = r->next_in_collection_set(); - if (cl->doHeapRegion(r)) { - cl->incomplete(); - return; - } - r = next; - } + _collection_set.iterate(cl); } -void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, - HeapRegionClosure *cl) { - if (r == NULL) { - // The CSet is empty so there's nothing to do. - return; - } - - assert(r->in_collection_set(), - "Start region must be a member of the collection set."); - HeapRegion* cur = r; - while (cur != NULL) { - HeapRegion* next = cur->next_in_collection_set(); - if (cl->doHeapRegion(cur) && false) { - cl->incomplete(); - return; - } - cur = next; - } - cur = collection_set()->head(); - while (cur != r) { - HeapRegion* next = cur->next_in_collection_set(); - if (cl->doHeapRegion(cur) && false) { - cl->incomplete(); - return; - } - cur = next; - } +void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) { + _collection_set.iterate_from(cl, worker_id, workers()->active_workers()); } HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { @@ -3090,6 +2979,14 @@ g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms); } +class G1PrintCollectionSetClosure : public HeapRegionClosure { +public: + virtual bool doHeapRegion(HeapRegion* r) { + G1CollectedHeap::heap()->hr_printer()->cset(r); + return false; + } +}; + bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { assert_at_safepoint(true /* should_be_vm_thread */); @@ -3268,11 +3165,8 @@ _cm->verify_no_cset_oops(); if (_hr_printer.is_active()) { - HeapRegion* hr = collection_set()->head(); - while (hr != NULL) { - _hr_printer.cset(hr); - hr = hr->next_in_collection_set(); - } + G1PrintCollectionSetClosure cl; + _collection_set.iterate(&cl); } // Initialize the GC alloc regions. @@ -3287,12 +3181,10 @@ post_evacuate_collection_set(evacuation_info, &per_thread_states); const size_t* surviving_young_words = per_thread_states.surviving_young_words(); - free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words); + free_collection_set(&_collection_set, evacuation_info, surviving_young_words); eagerly_reclaim_humongous_regions(); - collection_set()->clear_head(); - record_obj_copy_mem_stats(); _survivor_evac_stats.adjust_desired_plab_sz(); _old_evac_stats.adjust_desired_plab_sz(); @@ -4704,120 +4596,121 @@ workers()->run_task(&g1_par_scrub_rs_task); } -void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) { - size_t pre_used = 0; - FreeRegionList local_free_list("Local List for CSet Freeing"); - - double young_time_ms = 0.0; - double non_young_time_ms = 0.0; - - _eden.clear(); - - G1Policy* policy = g1_policy(); - - double start_sec = os::elapsedTime(); - bool non_young = true; +class G1FreeCollectionSetClosure : public HeapRegionClosure { +private: + const size_t* _surviving_young_words; - HeapRegion* cur = cs_head; - int age_bound = -1; - size_t rs_lengths = 0; - - while (cur != NULL) { - assert(!is_on_master_free_list(cur), "sanity"); - if (non_young) { - if (cur->is_young()) { - double end_sec = os::elapsedTime(); - double elapsed_ms = (end_sec - start_sec) * 1000.0; - non_young_time_ms += elapsed_ms; + FreeRegionList _local_free_list; + size_t _rs_lengths; + // Bytes used in successfully evacuated regions before the evacuation. + size_t _before_used_bytes; + // Bytes used in unsucessfully evacuated regions before the evacuation + size_t _after_used_bytes; - start_sec = os::elapsedTime(); - non_young = false; - } - } else { - if (!cur->is_young()) { - double end_sec = os::elapsedTime(); - double elapsed_ms = (end_sec - start_sec) * 1000.0; - young_time_ms += elapsed_ms; - - start_sec = os::elapsedTime(); - non_young = true; - } - } + double _young_time; + double _non_young_time; +public: + G1FreeCollectionSetClosure(const size_t* surviving_young_words) : + HeapRegionClosure(), + _surviving_young_words(surviving_young_words), + _local_free_list("Local Region List for CSet Freeing"), + _rs_lengths(0), + _before_used_bytes(0), + _after_used_bytes(0), + _young_time(0.0), + _non_young_time(0.0) { + } - rs_lengths += cur->rem_set()->occupied_locked(); + virtual bool doHeapRegion(HeapRegion* r) { + double start_time = os::elapsedTime(); - HeapRegion* next = cur->next_in_collection_set(); - assert(cur->in_collection_set(), "bad CS"); - cur->set_next_in_collection_set(NULL); - clear_in_cset(cur); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + assert(!g1h->is_on_master_free_list(r), "sanity"); - if (cur->is_young()) { - int index = cur->young_index_in_cset(); - assert(index != -1, "invariant"); - assert((uint) index < collection_set()->young_region_length(), "invariant"); - size_t words_survived = surviving_young_words[index]; - cur->record_surv_words_in_group(words_survived); + _rs_lengths += r->rem_set()->occupied_locked(); + assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index()); + g1h->clear_in_cset(r); + + if (r->is_young()) { + int index = r->young_index_in_cset(); + assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index()); + assert((uint) index < g1h->collection_set()->young_region_length(), "invariant"); + size_t words_survived = _surviving_young_words[index]; + r->record_surv_words_in_group(words_survived); } else { - int index = cur->young_index_in_cset(); - assert(index == -1, "invariant"); + assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index()); } - assert( (cur->is_young() && cur->young_index_in_cset() > -1) || - (!cur->is_young() && cur->young_index_in_cset() == -1), - "invariant" ); - - if (!cur->evacuation_failed()) { - MemRegion used_mr = cur->used_region(); + if (!r->evacuation_failed()) { + MemRegion used_mr = r->used_region(); // And the region is empty. - assert(!used_mr.is_empty(), "Should not have empty regions in a CS."); - pre_used += cur->used(); - free_region(cur, &local_free_list, false /* par */, true /* locked */); + assert(!used_mr.is_empty(), "Should not have empty regions in a collection set."); + _before_used_bytes += r->used(); + g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */); } else { - cur->uninstall_surv_rate_group(); - if (cur->is_young()) { - cur->set_young_index_in_cset(-1); - } - cur->set_evacuation_failed(false); + r->uninstall_surv_rate_group(); + r->set_young_index_in_cset(-1); + r->set_evacuation_failed(false); // When moving a young gen region to old gen, we "allocate" that whole region // there. This is in addition to any already evacuated objects. Notify the // policy about that. // Old gen regions do not cause an additional allocation: both the objects // still in the region and the ones already moved are accounted for elsewhere. - if (cur->is_young()) { - policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes); + if (r->is_young()) { + g1h->g1_policy()->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes); } // The region is now considered to be old. - cur->set_old(); + r->set_old(); // Do some allocation statistics accounting. Regions that failed evacuation // are always made old, so there is no need to update anything in the young // gen statistics, but we need to update old gen statistics. - size_t used_words = cur->marked_bytes() / HeapWordSize; - _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words); - _old_set.add(cur); - evacuation_info.increment_collectionset_used_after(cur->used()); + size_t used_words = r->marked_bytes() / HeapWordSize; + + g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words); + g1h->old_set_add(r); + _after_used_bytes += r->used(); } - cur = next; + + if (r->is_young()) { + _young_time += os::elapsedTime() - start_time; + } else { + _non_young_time += os::elapsedTime() - start_time; + } + return false; } - evacuation_info.set_regions_freed(local_free_list.length()); - policy->record_max_rs_lengths(rs_lengths); + FreeRegionList* local_free_list() { return &_local_free_list; } + size_t rs_lengths() const { return _rs_lengths; } + size_t before_used_bytes() const { return _before_used_bytes; } + size_t after_used_bytes() const { return _after_used_bytes; } + + double young_time() const { return _young_time; } + double non_young_time() const { return _non_young_time; } +}; + +void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) { + _eden.clear(); + + G1FreeCollectionSetClosure cl(surviving_young_words); + collection_set_iterate(&cl); + + evacuation_info.set_regions_freed(cl.local_free_list()->length()); + evacuation_info.increment_collectionset_used_after(cl.after_used_bytes()); + + G1Policy* policy = g1_policy(); + + policy->record_max_rs_lengths(cl.rs_lengths()); policy->cset_regions_freed(); - double end_sec = os::elapsedTime(); - double elapsed_ms = (end_sec - start_sec) * 1000.0; + prepend_to_freelist(cl.local_free_list()); + decrement_summary_bytes(cl.before_used_bytes()); - if (non_young) { - non_young_time_ms += elapsed_ms; - } else { - young_time_ms += elapsed_ms; - } + policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0); + policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0); - prepend_to_freelist(&local_free_list); - decrement_summary_bytes(pre_used); - policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); - policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); + collection_set->clear(); } class G1FreeHumongousRegionClosure : public HeapRegionClosure { @@ -4960,25 +4853,22 @@ cl.humongous_free_count()); } -// This routine is similar to the above but does not record -// any policy statistics or update free lists; we are abandoning -// the current incremental collection set in preparation of a -// full collection. After the full GC we will start to build up -// the incremental collection set again. -// This is only called when we're doing a full collection -// and is immediately followed by the tearing down of the young list. - -void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { - HeapRegion* cur = cs_head; - - while (cur != NULL) { - HeapRegion* next = cur->next_in_collection_set(); - assert(cur->in_collection_set(), "bad CS"); - cur->set_next_in_collection_set(NULL); - clear_in_cset(cur); - cur->set_young_index_in_cset(-1); - cur = next; +class G1AbandonCollectionSetClosure : public HeapRegionClosure { +public: + virtual bool doHeapRegion(HeapRegion* r) { + assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index()); + G1CollectedHeap::heap()->clear_in_cset(r); + r->set_young_index_in_cset(-1); + return false; } +}; + +void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) { + G1AbandonCollectionSetClosure cl; + collection_set->iterate(&cl); + + collection_set->clear(); + collection_set->stop_incremental_building(); } void G1CollectedHeap::set_free_regions_coming() {