< prev index next >
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Print this page
rev 53920 : [mq]: 8218880-g1-crashes-periodic-gc-gclocker
rev 53923 : [mq]: 8219747-remove-g1-prefix
@@ -257,18 +257,18 @@
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_starts_humongous(obj_top, word_fill_size);
- _g1_policy->remset_tracker()->update_at_allocate(first_hr);
+ _policy->remset_tracker()->update_at_allocate(first_hr);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (uint i = first + 1; i <= last; ++i) {
hr = region_at(i);
hr->set_continues_humongous(first_hr);
- _g1_policy->remset_tracker()->update_at_allocate(hr);
+ _policy->remset_tracker()->update_at_allocate(hr);
}
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
@@ -354,11 +354,11 @@
// the heap. Alternatively we could do a defragmentation GC.
log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
_hrm->expand_at(first, obj_regions, workers());
- g1_policy()->record_new_heap_size(num_regions());
+ policy()->record_new_heap_size(num_regions());
#ifdef ASSERT
for (uint i = first; i < first + obj_regions; ++i) {
HeapRegion* hr = region_at(i);
assert(hr->is_free(), "sanity");
@@ -438,11 +438,11 @@
}
// If the GCLocker is active and we are bound for a GC, try expanding young gen.
// This is different to when only GCLocker::needs_gc() is set: try to avoid
// waiting because the GCLocker is active to not wait too long.
- if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
+ if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
result = _allocator->attempt_allocation_force(word_size);
if (result != NULL) {
return result;
@@ -859,11 +859,11 @@
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
- if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
+ if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
word_size)) {
collect(GCCause::_g1_humongous_allocation);
}
// We will loop until a) we manage to successfully perform the
@@ -883,11 +883,11 @@
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size);
if (result != NULL) {
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
- g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
+ policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
return result;
}
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
// the GCLocker initiated GC has been performed and then retry. This includes
@@ -961,11 +961,11 @@
if (!is_humongous(word_size)) {
return _allocator->attempt_allocation_locked(word_size);
} else {
HeapWord* result = humongous_obj_allocate(word_size);
- if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
+ if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
collector_state()->set_initiate_conc_mark_if_possible(true);
}
return result;
}
@@ -1361,11 +1361,11 @@
}
if (expanded_by > 0) {
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
- g1_policy()->record_new_heap_size(num_regions());
+ policy()->record_new_heap_size(num_regions());
} else {
log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
@@ -1390,11 +1390,11 @@
log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
if (num_regions_removed > 0) {
- g1_policy()->record_new_heap_size(num_regions());
+ policy()->record_new_heap_size(num_regions());
} else {
log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
}
}
@@ -1508,15 +1508,15 @@
_old_marking_cycles_completed(0),
_eden(),
_survivor(),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
- _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
+ _policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
_heap_sizing_policy(NULL),
- _collection_set(this, _g1_policy),
+ _collection_set(this, _policy),
_hot_card_cache(NULL),
- _g1_rem_set(NULL),
+ _rem_set(NULL),
_dirty_card_queue_set(false),
_cm(NULL),
_cm_thread(NULL),
_cr(NULL),
_task_queues(NULL),
@@ -1538,11 +1538,11 @@
_verifier = new G1HeapVerifier(this);
_allocator = new G1Allocator(this);
- _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
+ _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
// Override the default _filler_array_max_size so that no humongous filler
// objects are created.
@@ -1635,11 +1635,11 @@
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
- size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
+ size_t max_byte_size = _collector_policy->heap_reserved_size_bytes();
size_t heap_alignment = collector_policy()->heap_alignment();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
@@ -1736,11 +1736,11 @@
G1RegionToSpaceMapper* prev_bitmap_storage =
create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
- _hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
+ _hrm = HeapRegionManager::create_manager(this, _collector_policy);
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_hot_card_cache->initialize(card_counts_storage);
@@ -1752,12 +1752,12 @@
// The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
// start within the first card.
guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
// Also create a G1 rem set.
- _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
- _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
+ _rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
+ _rem_set->initialize(max_reserved_capacity(), max_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
@@ -1797,11 +1797,11 @@
vm_shutdown_during_initialization("Failed to allocate initial heap.");
return JNI_ENOMEM;
}
// Perform any initialization actions delegated to the policy.
- g1_policy()->init(this, &_collection_set);
+ policy()->init(this, &_collection_set);
jint ecode = initialize_concurrent_refinement();
if (ecode != JNI_OK) {
return ecode;
}
@@ -1937,14 +1937,10 @@
CollectorPolicy* G1CollectedHeap::collector_policy() const {
return _collector_policy;
}
-G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
- return _collector_policy;
-}
-
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
size_t G1CollectedHeap::capacity() const {
@@ -1964,11 +1960,11 @@
size_t n_completed_buffers = 0;
while (dcqs.apply_closure_during_gc(cl, worker_i)) {
n_completed_buffers++;
}
assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
- g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
+ policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
}
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap::used() const {
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
@@ -2016,11 +2012,11 @@
default: return is_user_requested_concurrent_full_gc(cause);
}
}
bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
- if(g1_policy()->force_upgrade_to_full()) {
+ if(policy()->force_upgrade_to_full()) {
return true;
} else if (should_do_concurrent_full_gc(_gc_cause)) {
return false;
} else if (has_regions_left_for_allocation()) {
return false;
@@ -2144,11 +2140,11 @@
// we are not requesting a post-GC allocation.
VM_G1CollectForAllocation op(0, /* word_size */
gc_count_before,
cause,
true, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms());
+ policy()->max_pause_time_ms());
VMThread::execute(&op);
vmop_succeeded = op.pause_succeeded();
if (!vmop_succeeded && retry_on_vmop_failure) {
if (old_marking_count_before == _old_marking_cycles_started) {
should_retry_vmop = op.should_retry_gc();
@@ -2170,11 +2166,11 @@
// to 0 which means that we are not requesting a post-GC allocation.
VM_G1CollectForAllocation op(0, /* word_size */
gc_count_before,
cause,
false, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms());
+ policy()->max_pause_time_ms());
VMThread::execute(&op);
vmop_succeeded = op.pause_succeeded();
} else {
// Schedule a Full GC.
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
@@ -2272,11 +2268,11 @@
bool G1CollectedHeap::supports_tlab_allocation() const {
return true;
}
size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
- return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
+ return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
}
size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
return _eden.length() * HeapRegion::GrainBytes;
}
@@ -2301,11 +2297,11 @@
jlong G1CollectedHeap::millis_since_last_gc() {
// See the notes in GenCollectedHeap::millis_since_last_gc()
// for more information about the implementation.
jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
- _g1_policy->collection_pause_end_millis();
+ _policy->collection_pause_end_millis();
if (ret_val < 0) {
log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
". returning zero instead.", ret_val);
return 0;
}
@@ -2334,10 +2330,14 @@
bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
return _cm_thread->request_concurrent_phase(phase);
}
+bool G1CollectedHeap::is_heap_heterogeneous() const {
+ return _collector_policy->is_heap_heterogeneous();
+}
+
class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
PrintRegionClosure(outputStream* st) : _st(st) {}
bool do_heap_region(HeapRegion* r) {
@@ -2443,11 +2443,11 @@
G1StringDedup::threads_do(tc);
}
}
void G1CollectedHeap::print_tracing_info() const {
- g1_rem_set()->print_summary_info();
+ rem_set()->print_summary_info();
concurrent_mark()->print_summary_info();
}
#ifndef PRODUCT
// Helpful for debugging RSet issues.
@@ -2503,11 +2503,11 @@
size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
- (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
+ (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
eden_capacity_bytes, survivor_used_bytes, num_regions());
}
@@ -2537,33 +2537,33 @@
void G1CollectedHeap::gc_prologue(bool full) {
// always_do_update_barrier = false;
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
// This summary needs to be printed before incrementing total collections.
- g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
+ rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
// Update common counters.
increment_total_collections(full /* full gc */);
if (full) {
increment_old_marking_cycles_started();
}
// Fill TLAB's and such
double start = os::elapsedTime();
ensure_parsability(true);
- g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
+ policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
}
void G1CollectedHeap::gc_epilogue(bool full) {
// Update common counters.
if (full) {
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
}
// We are at the end of the GC. Total collections has already been increased.
- g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
+ rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
// FIXME: what is this about?
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
// is set.
#if COMPILER2_OR_JVMCI
@@ -2571,11 +2571,11 @@
#endif
// always_do_update_barrier = true;
double start = os::elapsedTime();
resize_all_tlabs();
- g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
+ policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
MemoryService::track_memory_usage();
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
Universe::update_heap_info_at_gc();
@@ -2588,11 +2588,11 @@
assert_heap_not_locked_and_not_at_safepoint();
VM_G1CollectForAllocation op(word_size,
gc_count_before,
gc_cause,
false, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms());
+ policy()->max_pause_time_ms());
VMThread::execute(&op);
HeapWord* result = op.result();
bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
assert(result == NULL || ret_succeeded,
@@ -2765,21 +2765,21 @@
void flush_rem_set_entries() { _dcq.flush(); }
};
void G1CollectedHeap::register_humongous_regions_with_cset() {
if (!G1EagerReclaimHumongousObjects) {
- g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
+ policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
return;
}
double time = os::elapsed_counter();
// Collect reclaim candidate information and register candidates with cset.
RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl);
time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
- g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
+ policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
cl.total_humongous(),
cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
// Finally flush all remembered set entries to re-check into the global DCQS.
@@ -2847,11 +2847,11 @@
double wait_time_ms = 0.0;
if (waited) {
double scan_wait_end = os::elapsedTime();
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
}
- g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
+ policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
}
class G1PrintCollectionSetClosure : public HeapRegionClosure {
private:
G1HRPrinter* _hr_printer;
@@ -2868,11 +2868,11 @@
collection_set()->start_incremental_building();
clear_cset_fast_test();
guarantee(_eden.length() == 0, "eden should have been cleared");
- g1_policy()->transfer_survivors_to_cset(survivor());
+ policy()->transfer_survivors_to_cset(survivor());
}
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint_on_vm_thread();
@@ -2888,11 +2888,11 @@
_gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
- g1_policy()->note_gc_start();
+ policy()->note_gc_start();
wait_for_root_region_scanning();
print_heap_before_gc();
print_heap_regions();
@@ -2904,11 +2904,11 @@
// We should not be doing initial mark unless the conc mark thread is running
if (!_cm_thread->should_terminate()) {
// This call will decide whether this pause is an initial-mark
// pause. If it is, in_initial_mark_gc() will return true
// for the duration of this pause.
- g1_policy()->decide_on_conc_mark_initiation();
+ policy()->decide_on_conc_mark_initiation();
}
// We do not allow initial-mark to be piggy-backed on a mixed GC.
assert(!collector_state()->in_initial_mark_gc() ||
collector_state()->in_young_only_phase(), "sanity");
@@ -3016,17 +3016,17 @@
//
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double sample_start_time_sec = os::elapsedTime();
- g1_policy()->record_collection_pause_start(sample_start_time_sec);
+ policy()->record_collection_pause_start(sample_start_time_sec);
if (collector_state()->in_initial_mark_gc()) {
concurrent_mark()->pre_initial_mark();
}
- g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
+ policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
evacuation_info.set_collectionset_regions(collection_set()->region_length());
register_humongous_regions_with_cset();
@@ -3065,16 +3065,16 @@
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
double start = os::elapsedTime();
start_new_collection_set();
- g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
+ policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
if (evacuation_failed()) {
double recalculate_used_start = os::elapsedTime();
set_used(recalculate_used());
- g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
+ policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
if (_archive_allocator != NULL) {
_archive_allocator->clear_used();
}
for (uint i = 0; i < ParallelGCThreads; i++) {
@@ -3083,11 +3083,11 @@
}
}
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
- increase_used(g1_policy()->bytes_copied_during_gc());
+ increase_used(policy()->bytes_copied_during_gc());
}
if (collector_state()->in_initial_mark_gc()) {
// We have to do this before we notify the CM threads that
// they can start working to make sure that all the
@@ -3110,11 +3110,11 @@
// expansion_amount() does this when it returns a value > 0.
double expand_ms;
if (!expand(expand_bytes, _workers, &expand_ms)) {
// We failed to expand the heap. Cannot do anything about it.
}
- g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
+ policy()->phase_times()->record_expand_heap_time(expand_ms);
}
}
// We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed.
@@ -3123,15 +3123,15 @@
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
- size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
- g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
+ size_t total_cards_scanned = policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
+ policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
- evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
+ evacuation_info.set_bytes_copied(policy()->bytes_copied_during_gc());
if (VerifyRememberedSets) {
log_info(gc, verify)("[Verifying RemSets after GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
@@ -3156,11 +3156,11 @@
// Print the remainder of the GC log output.
if (evacuation_failed()) {
log_info(gc)("To-space exhausted");
}
- g1_policy()->print_phases();
+ policy()->print_phases();
heap_transition.print();
// It is not yet to safe to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
@@ -3181,11 +3181,11 @@
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
_gc_tracer_stw->report_evacuation_info(&evacuation_info);
- _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
+ _gc_tracer_stw->report_tenuring_threshold(_policy->tenuring_threshold());
_gc_timer_stw->register_gc_end();
_gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
}
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
@@ -3215,11 +3215,11 @@
remove_self_forwarding_pointers();
SharedRestorePreservedMarksTaskExecutor task_executor(workers());
_preserved_marks_set.restore(&task_executor);
- g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
+ policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
@@ -3273,11 +3273,11 @@
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
double start_sec = os::elapsedTime();
- _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
+ _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
{
ResourceMark rm;
HandleMark hm;
@@ -3288,11 +3288,11 @@
double start_strong_roots_sec = os::elapsedTime();
_root_processor->evacuate_roots(pss, worker_id);
- _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
+ _g1h->rem_set()->oops_into_collection_set_do(pss, worker_id);
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
double term_sec = 0.0;
size_t evac_term_attempts = 0;
@@ -3303,11 +3303,11 @@
evac_term_attempts = evac.term_attempts();
term_sec = evac.term_time();
double elapsed_sec = os::elapsedTime() - start;
- G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
+ G1GCPhaseTimes* p = _g1h->policy()->phase_times();
p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
worker_id,
pss->lab_waste_words() * HeapWordSize,
@@ -3325,11 +3325,11 @@
// Close the inner scope so that the ResourceMark and HandleMark
// destructors are executed here and are included as part of the
// "GC Worker Time".
}
- _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
+ _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
}
};
void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
bool class_unloading_occurred) {
@@ -3390,11 +3390,11 @@
public:
G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
_queue(queue), _g1h(g1h) { }
virtual void work(uint worker_id) {
- G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
+ G1GCPhaseTimes* phase_times = _g1h->policy()->phase_times();
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
RedirtyLoggedCardTableEntryClosure cl(_g1h);
_queue->par_apply_closure_to_all_completed_buffers(&cl);
@@ -3411,11 +3411,11 @@
G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
- g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
+ policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
}
// Weak Reference Processing support
bool G1STWIsAliveClosure::do_object_b(oop p) {
@@ -3644,11 +3644,11 @@
G1STWDrainQueueClosure drain_queue(this, pss);
// Setup the soft refs policy...
rp->setup_policy(false);
- ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
+ ReferenceProcessorPhaseTimes* pt = policy()->phase_times()->ref_phase_times();
ReferenceProcessorStats stats;
if (!rp->processing_is_mt()) {
// Serial reference processing...
stats = rp->process_discovered_references(&is_alive,
@@ -3680,11 +3680,11 @@
make_pending_list_reachable();
rp->verify_no_references_recorded();
double ref_proc_time = os::elapsedTime() - ref_proc_start;
- g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
+ policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
}
void G1CollectedHeap::make_pending_list_reachable() {
if (collector_state()->in_initial_mark_gc()) {
oop pll_head = Universe::reference_pending_list();
@@ -3696,25 +3696,25 @@
}
void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
double merge_pss_time_start = os::elapsedTime();
per_thread_states->flush();
- g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
+ policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
}
void G1CollectedHeap::pre_evacuate_collection_set() {
_expand_heap_after_alloc_failure = true;
_evacuation_failed = false;
// Disable the hot card cache.
_hot_card_cache->reset_hot_cache_claimed_index();
_hot_card_cache->set_use_cache(false);
- g1_rem_set()->prepare_for_oops_into_collection_set_do();
+ rem_set()->prepare_for_oops_into_collection_set_do();
_preserved_marks_set.assert_empty();
- G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
+ G1GCPhaseTimes* phase_times = policy()->phase_times();
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if (collector_state()->in_initial_mark_gc()) {
double start_clear_claimed_marks = os::elapsedTime();
@@ -3729,11 +3729,11 @@
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
- G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
+ G1GCPhaseTimes* phase_times = policy()->phase_times();
double start_par_time_sec = os::elapsedTime();
double end_par_time_sec;
{
@@ -3788,11 +3788,11 @@
HeapRegion* hr = _optional->region_at(i);
G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
copy_time += trim_ticks(pss);
- G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
+ G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
scan_rs_cl.do_heap_region(hr);
copy_time += trim_ticks(pss);
scanned += scan_rs_cl.cards_scanned();
claimed += scan_rs_cl.cards_claimed();
skipped += scan_rs_cl.cards_skipped();
@@ -3800,11 +3800,11 @@
// Chunk lists for this region is no longer needed.
used_memory += pss->oops_into_optional_region(hr)->used_memory();
}
Tickspan scan_time = (Ticks::now() - start) - copy_time;
- G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
+ G1GCPhaseTimes* p = _g1h->policy()->phase_times();
p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
@@ -3816,11 +3816,11 @@
Ticks start = Ticks::now();
G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
cl.do_void();
Tickspan evac_time = (Ticks::now() - start);
- G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
+ G1GCPhaseTimes* p = _g1h->policy()->phase_times();
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
}
public:
@@ -3865,11 +3865,11 @@
if (evacuation_failed()) {
return;
}
- G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
+ G1GCPhaseTimes* phase_times = policy()->phase_times();
const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
double start_time_sec = os::elapsedTime();
do {
@@ -3879,11 +3879,11 @@
if (time_left_ms < 0) {
log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
break;
}
- optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
+ optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction());
if (optional_cset.prepare_failed()) {
log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
break;
}
@@ -3899,11 +3899,11 @@
}
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
// Also cleans the card table from temporary duplicate detection information used
// during UpdateRS/ScanRS.
- g1_rem_set()->cleanup_after_oops_into_collection_set_do();
+ rem_set()->cleanup_after_oops_into_collection_set_do();
// Process any discovered reference objects - we have
// to do this _before_ we retire the GC alloc regions
// as we may have to copy some 'reachable' referent
// objects (and their reachable sub-graphs) that were
@@ -3912,19 +3912,19 @@
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
- g1_policy()->phase_times()->weak_phase_times());
+ policy()->phase_times()->weak_phase_times());
if (G1StringDedup::is_enabled()) {
double string_dedup_time_ms = os::elapsedTime();
- string_dedup_cleaning(&is_alive, &keep_alive, g1_policy()->phase_times());
+ string_dedup_cleaning(&is_alive, &keep_alive, policy()->phase_times());
double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0;
- g1_policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms);
+ policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms);
}
if (evacuation_failed()) {
restore_after_evac_failure();
@@ -3950,17 +3950,17 @@
redirty_logged_cards();
#if COMPILER2_OR_JVMCI
double start = os::elapsedTime();
DerivedPointerTable::update_pointers();
- g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
+ policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
#endif
- g1_policy()->print_age_table();
+ policy()->print_age_table();
}
void G1CollectedHeap::record_obj_copy_mem_stats() {
- g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
+ policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
_gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
create_g1_evac_summary(&_old_evac_stats));
}
@@ -3984,11 +3984,11 @@
// (since we don't refine cards in young regions).
if (!skip_hot_card_cache && !hr->is_young()) {
_hot_card_cache->reset_card_counts(hr);
}
hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
- _g1_policy->remset_tracker()->update_at_free(hr);
+ _policy->remset_tracker()->update_at_free(hr);
free_list->add_ordered(hr);
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list) {
@@ -4114,11 +4114,11 @@
_evacuation_info->increment_collectionset_used_after(_after_used_bytes);
g1h->prepend_to_freelist(&_local_free_list);
g1h->decrement_summary_bytes(_before_used_bytes);
- G1Policy* policy = g1h->g1_policy();
+ G1Policy* policy = g1h->policy();
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
}
};
@@ -4189,11 +4189,11 @@
}
void complete_work() {
_cl.complete_work();
- G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
+ G1Policy* policy = G1CollectedHeap::heap()->policy();
policy->record_max_rs_lengths(_rs_lengths);
policy->cset_regions_freed();
}
public:
G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
@@ -4217,11 +4217,11 @@
// Chunk size for work distribution. The chosen value has been determined experimentally
// to be a good tradeoff between overhead and achievable parallelism.
static uint chunk_size() { return 32; }
virtual void work(uint worker_id) {
- G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
+ G1GCPhaseTimes* timer = G1CollectedHeap::heap()->policy()->phase_times();
// Claim serial work.
if (_serial_work_claim == 0) {
jint value = Atomic::add(1, &_serial_work_claim) - 1;
if (value == 0) {
@@ -4294,11 +4294,11 @@
cl.name(),
num_workers,
_collection_set.region_length());
workers()->run_task(&cl, num_workers);
}
- g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
+ policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
collection_set->clear();
}
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
@@ -4419,11 +4419,11 @@
void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
assert_at_safepoint_on_vm_thread();
if (!G1EagerReclaimHumongousObjects ||
(!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
- g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
+ policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
return;
}
double start_time = os::elapsedTime();
@@ -4444,11 +4444,11 @@
}
prepend_to_freelist(&local_cleanup_list);
decrement_summary_bytes(cl.bytes_freed());
- g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
+ policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
cl.humongous_objects_reclaimed());
}
class G1AbandonCollectionSetClosure : public HeapRegionClosure {
public:
@@ -4472,11 +4472,11 @@
return _allocator->is_retained_old_region(hr);
}
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
_eden.add(hr);
- _g1_policy->set_region_eden(hr);
+ _policy->set_region_eden(hr);
}
#ifdef ASSERT
class NoYoungRegionsClosure: public HeapRegionClosure {
@@ -4642,20 +4642,20 @@
// Methods for the mutator alloc region
HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
bool force) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
- bool should_allocate = g1_policy()->should_allocate_mutator_region();
+ bool should_allocate = policy()->should_allocate_mutator_region();
if (force || should_allocate) {
HeapRegion* new_alloc_region = new_region(word_size,
HeapRegionType::Eden,
false /* do_expand */);
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, !should_allocate);
_verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
- _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
+ _policy->remset_tracker()->update_at_allocate(new_alloc_region);
return new_alloc_region;
}
}
return NULL;
}
@@ -4678,11 +4678,11 @@
bool G1CollectedHeap::has_more_regions(InCSetState dest) {
if (dest.is_old()) {
return true;
} else {
- return survivor_regions_count() < g1_policy()->max_survivor_regions();
+ return survivor_regions_count() < policy()->max_survivor_regions();
}
}
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
@@ -4709,21 +4709,21 @@
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
} else {
new_alloc_region->set_old();
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
}
- _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
+ _policy->remset_tracker()->update_at_allocate(new_alloc_region);
_hr_printer.alloc(new_alloc_region);
return new_alloc_region;
}
return NULL;
}
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
InCSetState dest) {
- g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
+ policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_old()) {
old_set_add(alloc_region);
}
bool const during_im = collector_state()->in_initial_mark_gc();
@@ -4824,11 +4824,11 @@
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
G1CodeRootSet::purge();
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
- g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
+ policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
class RebuildStrongCodeRootClosure: public CodeBlobClosure {
G1CollectedHeap* _g1h;
< prev index next >