< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 53920 : [mq]: 8218880-g1-crashes-periodic-gc-gclocker
rev 53923 : [mq]: 8219747-remove-g1-prefix

*** 257,274 **** // We will set up the first region as "starts humongous". This // will also update the BOT covering all the regions to reflect // that there is a single object that starts at the bottom of the // first region. first_hr->set_starts_humongous(obj_top, word_fill_size); ! _g1_policy->remset_tracker()->update_at_allocate(first_hr); // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; for (uint i = first + 1; i <= last; ++i) { hr = region_at(i); hr->set_continues_humongous(first_hr); ! _g1_policy->remset_tracker()->update_at_allocate(hr); } // Up to this point no concurrent thread would have been able to // do any scanning on any region in this series. All the top // fields still point to bottom, so the intersection between --- 257,274 ---- // We will set up the first region as "starts humongous". This // will also update the BOT covering all the regions to reflect // that there is a single object that starts at the bottom of the // first region. first_hr->set_starts_humongous(obj_top, word_fill_size); ! _policy->remset_tracker()->update_at_allocate(first_hr); // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; for (uint i = first + 1; i <= last; ++i) { hr = region_at(i); hr->set_continues_humongous(first_hr); ! _policy->remset_tracker()->update_at_allocate(hr); } // Up to this point no concurrent thread would have been able to // do any scanning on any region in this series. All the top // fields still point to bottom, so the intersection between
*** 354,364 **** // the heap. Alternatively we could do a defragmentation GC. log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); _hrm->expand_at(first, obj_regions, workers()); ! g1_policy()->record_new_heap_size(num_regions()); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) { HeapRegion* hr = region_at(i); assert(hr->is_free(), "sanity"); --- 354,364 ---- // the heap. Alternatively we could do a defragmentation GC. log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); _hrm->expand_at(first, obj_regions, workers()); ! policy()->record_new_heap_size(num_regions()); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) { HeapRegion* hr = region_at(i); assert(hr->is_free(), "sanity");
*** 438,448 **** } // If the GCLocker is active and we are bound for a GC, try expanding young gen. // This is different to when only GCLocker::needs_gc() is set: try to avoid // waiting because the GCLocker is active to not wait too long. ! if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) { // No need for an ergo message here, can_expand_young_list() does this when // it returns true. result = _allocator->attempt_allocation_force(word_size); if (result != NULL) { return result; --- 438,448 ---- } // If the GCLocker is active and we are bound for a GC, try expanding young gen. // This is different to when only GCLocker::needs_gc() is set: try to avoid // waiting because the GCLocker is active to not wait too long. ! if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { // No need for an ergo message here, can_expand_young_list() does this when // it returns true. result = _allocator->attempt_allocation_force(word_size); if (result != NULL) { return result;
*** 859,869 **** // Humongous objects can exhaust the heap quickly, so we should check if we // need to start a marking cycle at each humongous object allocation. We do // the check before we do the actual allocation. The reason for doing it // before the allocation is that we avoid having to keep track of the newly // allocated memory while we do a GC. ! if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { collect(GCCause::_g1_humongous_allocation); } // We will loop until a) we manage to successfully perform the --- 859,869 ---- // Humongous objects can exhaust the heap quickly, so we should check if we // need to start a marking cycle at each humongous object allocation. We do // the check before we do the actual allocation. The reason for doing it // before the allocation is that we avoid having to keep track of the newly // allocated memory while we do a GC. ! if (policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { collect(GCCause::_g1_humongous_allocation); } // We will loop until a) we manage to successfully perform the
*** 883,893 **** // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap. result = humongous_obj_allocate(word_size); if (result != NULL) { size_t size_in_regions = humongous_obj_size_in_regions(word_size); ! g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result; } // Only try a GC if the GCLocker does not signal the need for a GC. Wait until // the GCLocker initiated GC has been performed and then retry. This includes --- 883,893 ---- // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap. result = humongous_obj_allocate(word_size); if (result != NULL) { size_t size_in_regions = humongous_obj_size_in_regions(word_size); ! policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result; } // Only try a GC if the GCLocker does not signal the need for a GC. Wait until // the GCLocker initiated GC has been performed and then retry. This includes
*** 961,971 **** if (!is_humongous(word_size)) { return _allocator->attempt_allocation_locked(word_size); } else { HeapWord* result = humongous_obj_allocate(word_size); ! if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { collector_state()->set_initiate_conc_mark_if_possible(true); } return result; } --- 961,971 ---- if (!is_humongous(word_size)) { return _allocator->attempt_allocation_locked(word_size); } else { HeapWord* result = humongous_obj_allocate(word_size); ! if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) { collector_state()->set_initiate_conc_mark_if_possible(true); } return result; }
*** 1361,1371 **** } if (expanded_by > 0) { size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); ! g1_policy()->record_new_heap_size(num_regions()); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. --- 1361,1371 ---- } if (expanded_by > 0) { size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); ! policy()->record_new_heap_size(num_regions()); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap.
*** 1390,1400 **** log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { ! g1_policy()->record_new_heap_size(num_regions()); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); } } --- 1390,1400 ---- log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { ! policy()->record_new_heap_size(num_regions()); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); } }
*** 1508,1522 **** _old_marking_cycles_completed(0), _eden(), _survivor(), _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), ! _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)), _heap_sizing_policy(NULL), ! _collection_set(this, _g1_policy), _hot_card_cache(NULL), ! _g1_rem_set(NULL), _dirty_card_queue_set(false), _cm(NULL), _cm_thread(NULL), _cr(NULL), _task_queues(NULL), --- 1508,1522 ---- _old_marking_cycles_completed(0), _eden(), _survivor(), _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), ! _policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)), _heap_sizing_policy(NULL), ! _collection_set(this, _policy), _hot_card_cache(NULL), ! _rem_set(NULL), _dirty_card_queue_set(false), _cm(NULL), _cm_thread(NULL), _cr(NULL), _task_queues(NULL),
*** 1538,1548 **** _verifier = new G1HeapVerifier(this); _allocator = new G1Allocator(this); ! _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics()); _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); // Override the default _filler_array_max_size so that no humongous filler // objects are created. --- 1538,1548 ---- _verifier = new G1HeapVerifier(this); _allocator = new G1Allocator(this); ! _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics()); _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); // Override the default _filler_array_max_size so that no humongous filler // objects are created.
*** 1635,1645 **** // cases incorrectly returns the size in wordSize units rather than // HeapWordSize). guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); size_t init_byte_size = collector_policy()->initial_heap_byte_size(); ! size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes(); size_t heap_alignment = collector_policy()->heap_alignment(); // Ensure that the sizes are properly aligned. Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); --- 1635,1645 ---- // cases incorrectly returns the size in wordSize units rather than // HeapWordSize). guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); size_t init_byte_size = collector_policy()->initial_heap_byte_size(); ! size_t max_byte_size = _collector_policy->heap_reserved_size_bytes(); size_t heap_alignment = collector_policy()->heap_alignment(); // Ensure that the sizes are properly aligned. Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
*** 1736,1746 **** G1RegionToSpaceMapper* prev_bitmap_storage = create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); G1RegionToSpaceMapper* next_bitmap_storage = create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); ! _hrm = HeapRegionManager::create_manager(this, g1_collector_policy()); _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); _card_table->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _hot_card_cache->initialize(card_counts_storage); --- 1736,1746 ---- G1RegionToSpaceMapper* prev_bitmap_storage = create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); G1RegionToSpaceMapper* next_bitmap_storage = create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); ! _hrm = HeapRegionManager::create_manager(this, _collector_policy); _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); _card_table->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _hot_card_cache->initialize(card_counts_storage);
*** 1752,1763 **** // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not // start within the first card. guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card."); // Also create a G1 rem set. ! _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache); ! _g1_rem_set->initialize(max_reserved_capacity(), max_regions()); size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, "too many cards per region"); --- 1752,1763 ---- // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not // start within the first card. guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card."); // Also create a G1 rem set. ! _rem_set = new G1RemSet(this, _card_table, _hot_card_cache); ! _rem_set->initialize(max_reserved_capacity(), max_regions()); size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, "too many cards per region");
*** 1797,1807 **** vm_shutdown_during_initialization("Failed to allocate initial heap."); return JNI_ENOMEM; } // Perform any initialization actions delegated to the policy. ! g1_policy()->init(this, &_collection_set); jint ecode = initialize_concurrent_refinement(); if (ecode != JNI_OK) { return ecode; } --- 1797,1807 ---- vm_shutdown_during_initialization("Failed to allocate initial heap."); return JNI_ENOMEM; } // Perform any initialization actions delegated to the policy. ! policy()->init(this, &_collection_set); jint ecode = initialize_concurrent_refinement(); if (ecode != JNI_OK) { return ecode; }
*** 1937,1950 **** CollectorPolicy* G1CollectedHeap::collector_policy() const { return _collector_policy; } - G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const { - return _collector_policy; - } - SoftRefPolicy* G1CollectedHeap::soft_ref_policy() { return &_soft_ref_policy; } size_t G1CollectedHeap::capacity() const { --- 1937,1946 ----
*** 1964,1974 **** size_t n_completed_buffers = 0; while (dcqs.apply_closure_during_gc(cl, worker_i)) { n_completed_buffers++; } assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!"); ! g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers); } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions(); --- 1960,1970 ---- size_t n_completed_buffers = 0; while (dcqs.apply_closure_during_gc(cl, worker_i)) { n_completed_buffers++; } assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!"); ! policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers); } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
*** 2016,2026 **** default: return is_user_requested_concurrent_full_gc(cause); } } bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) { ! if(g1_policy()->force_upgrade_to_full()) { return true; } else if (should_do_concurrent_full_gc(_gc_cause)) { return false; } else if (has_regions_left_for_allocation()) { return false; --- 2012,2022 ---- default: return is_user_requested_concurrent_full_gc(cause); } } bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) { ! if(policy()->force_upgrade_to_full()) { return true; } else if (should_do_concurrent_full_gc(_gc_cause)) { return false; } else if (has_regions_left_for_allocation()) { return false;
*** 2144,2154 **** // we are not requesting a post-GC allocation. VM_G1CollectForAllocation op(0, /* word_size */ gc_count_before, cause, true, /* should_initiate_conc_mark */ ! g1_policy()->max_pause_time_ms()); VMThread::execute(&op); vmop_succeeded = op.pause_succeeded(); if (!vmop_succeeded && retry_on_vmop_failure) { if (old_marking_count_before == _old_marking_cycles_started) { should_retry_vmop = op.should_retry_gc(); --- 2140,2150 ---- // we are not requesting a post-GC allocation. VM_G1CollectForAllocation op(0, /* word_size */ gc_count_before, cause, true, /* should_initiate_conc_mark */ ! policy()->max_pause_time_ms()); VMThread::execute(&op); vmop_succeeded = op.pause_succeeded(); if (!vmop_succeeded && retry_on_vmop_failure) { if (old_marking_count_before == _old_marking_cycles_started) { should_retry_vmop = op.should_retry_gc();
*** 2170,2180 **** // to 0 which means that we are not requesting a post-GC allocation. VM_G1CollectForAllocation op(0, /* word_size */ gc_count_before, cause, false, /* should_initiate_conc_mark */ ! g1_policy()->max_pause_time_ms()); VMThread::execute(&op); vmop_succeeded = op.pause_succeeded(); } else { // Schedule a Full GC. VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); --- 2166,2176 ---- // to 0 which means that we are not requesting a post-GC allocation. VM_G1CollectForAllocation op(0, /* word_size */ gc_count_before, cause, false, /* should_initiate_conc_mark */ ! policy()->max_pause_time_ms()); VMThread::execute(&op); vmop_succeeded = op.pause_succeeded(); } else { // Schedule a Full GC. VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
*** 2272,2282 **** bool G1CollectedHeap::supports_tlab_allocation() const { return true; } size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { ! return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes; } size_t G1CollectedHeap::tlab_used(Thread* ignored) const { return _eden.length() * HeapRegion::GrainBytes; } --- 2268,2278 ---- bool G1CollectedHeap::supports_tlab_allocation() const { return true; } size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { ! return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes; } size_t G1CollectedHeap::tlab_used(Thread* ignored) const { return _eden.length() * HeapRegion::GrainBytes; }
*** 2301,2311 **** jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - ! _g1_policy->collection_pause_end_millis(); if (ret_val < 0) { log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT ". returning zero instead.", ret_val); return 0; } --- 2297,2307 ---- jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - ! _policy->collection_pause_end_millis(); if (ret_val < 0) { log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT ". returning zero instead.", ret_val); return 0; }
*** 2334,2343 **** --- 2330,2343 ---- bool G1CollectedHeap::request_concurrent_phase(const char* phase) { return _cm_thread->request_concurrent_phase(phase); } + bool G1CollectedHeap::is_heap_heterogeneous() const { + return _collector_policy->is_heap_heterogeneous(); + } + class PrintRegionClosure: public HeapRegionClosure { outputStream* _st; public: PrintRegionClosure(outputStream* st) : _st(st) {} bool do_heap_region(HeapRegion* r) {
*** 2443,2453 **** G1StringDedup::threads_do(tc); } } void G1CollectedHeap::print_tracing_info() const { ! g1_rem_set()->print_summary_info(); concurrent_mark()->print_summary_info(); } #ifndef PRODUCT // Helpful for debugging RSet issues. --- 2443,2453 ---- G1StringDedup::threads_do(tc); } } void G1CollectedHeap::print_tracing_info() const { ! rem_set()->print_summary_info(); concurrent_mark()->print_summary_info(); } #ifndef PRODUCT // Helpful for debugging RSet issues.
*** 2503,2513 **** size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes; size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes; size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked(); size_t eden_capacity_bytes = ! (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes; VirtualSpaceSummary heap_summary = create_heap_space_summary(); return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions()); } --- 2503,2513 ---- size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes; size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes; size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked(); size_t eden_capacity_bytes = ! (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes; VirtualSpaceSummary heap_summary = create_heap_space_summary(); return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions()); }
*** 2537,2569 **** void G1CollectedHeap::gc_prologue(bool full) { // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // This summary needs to be printed before incrementing total collections. ! g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); if (full) { increment_old_marking_cycles_started(); } // Fill TLAB's and such double start = os::elapsedTime(); ensure_parsability(true); ! g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0); } void G1CollectedHeap::gc_epilogue(bool full) { // Update common counters. if (full) { // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); } // We are at the end of the GC. Total collections has already been increased. ! g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1); // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" // is set. #if COMPILER2_OR_JVMCI --- 2537,2569 ---- void G1CollectedHeap::gc_prologue(bool full) { // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // This summary needs to be printed before incrementing total collections. ! rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); if (full) { increment_old_marking_cycles_started(); } // Fill TLAB's and such double start = os::elapsedTime(); ensure_parsability(true); ! policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0); } void G1CollectedHeap::gc_epilogue(bool full) { // Update common counters. if (full) { // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); } // We are at the end of the GC. Total collections has already been increased. ! rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1); // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" // is set. #if COMPILER2_OR_JVMCI
*** 2571,2581 **** #endif // always_do_update_barrier = true; double start = os::elapsedTime(); resize_all_tlabs(); ! g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0); MemoryService::track_memory_usage(); // We have just completed a GC. Update the soft reference // policy with the new heap occupancy Universe::update_heap_info_at_gc(); --- 2571,2581 ---- #endif // always_do_update_barrier = true; double start = os::elapsedTime(); resize_all_tlabs(); ! policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0); MemoryService::track_memory_usage(); // We have just completed a GC. Update the soft reference // policy with the new heap occupancy Universe::update_heap_info_at_gc();
*** 2588,2598 **** assert_heap_not_locked_and_not_at_safepoint(); VM_G1CollectForAllocation op(word_size, gc_count_before, gc_cause, false, /* should_initiate_conc_mark */ ! g1_policy()->max_pause_time_ms()); VMThread::execute(&op); HeapWord* result = op.result(); bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); assert(result == NULL || ret_succeeded, --- 2588,2598 ---- assert_heap_not_locked_and_not_at_safepoint(); VM_G1CollectForAllocation op(word_size, gc_count_before, gc_cause, false, /* should_initiate_conc_mark */ ! policy()->max_pause_time_ms()); VMThread::execute(&op); HeapWord* result = op.result(); bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); assert(result == NULL || ret_succeeded,
*** 2765,2785 **** void flush_rem_set_entries() { _dcq.flush(); } }; void G1CollectedHeap::register_humongous_regions_with_cset() { if (!G1EagerReclaimHumongousObjects) { ! g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0); return; } double time = os::elapsed_counter(); // Collect reclaim candidate information and register candidates with cset. RegisterHumongousWithInCSetFastTestClosure cl; heap_region_iterate(&cl); time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0; ! g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time, cl.total_humongous(), cl.candidate_humongous()); _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; // Finally flush all remembered set entries to re-check into the global DCQS. --- 2765,2785 ---- void flush_rem_set_entries() { _dcq.flush(); } }; void G1CollectedHeap::register_humongous_regions_with_cset() { if (!G1EagerReclaimHumongousObjects) { ! policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0); return; } double time = os::elapsed_counter(); // Collect reclaim candidate information and register candidates with cset. RegisterHumongousWithInCSetFastTestClosure cl; heap_region_iterate(&cl); time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0; ! policy()->phase_times()->record_fast_reclaim_humongous_stats(time, cl.total_humongous(), cl.candidate_humongous()); _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; // Finally flush all remembered set entries to re-check into the global DCQS.
*** 2847,2857 **** double wait_time_ms = 0.0; if (waited) { double scan_wait_end = os::elapsedTime(); wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0; } ! g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms); } class G1PrintCollectionSetClosure : public HeapRegionClosure { private: G1HRPrinter* _hr_printer; --- 2847,2857 ---- double wait_time_ms = 0.0; if (waited) { double scan_wait_end = os::elapsedTime(); wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0; } ! policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms); } class G1PrintCollectionSetClosure : public HeapRegionClosure { private: G1HRPrinter* _hr_printer;
*** 2868,2878 **** collection_set()->start_incremental_building(); clear_cset_fast_test(); guarantee(_eden.length() == 0, "eden should have been cleared"); ! g1_policy()->transfer_survivors_to_cset(survivor()); } bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { assert_at_safepoint_on_vm_thread(); --- 2868,2878 ---- collection_set()->start_incremental_building(); clear_cset_fast_test(); guarantee(_eden.length() == 0, "eden should have been cleared"); ! policy()->transfer_survivors_to_cset(survivor()); } bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { assert_at_safepoint_on_vm_thread();
*** 2888,2898 **** _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; ! g1_policy()->note_gc_start(); wait_for_root_region_scanning(); print_heap_before_gc(); print_heap_regions(); --- 2888,2898 ---- _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; ! policy()->note_gc_start(); wait_for_root_region_scanning(); print_heap_before_gc(); print_heap_regions();
*** 2904,2914 **** // We should not be doing initial mark unless the conc mark thread is running if (!_cm_thread->should_terminate()) { // This call will decide whether this pause is an initial-mark // pause. If it is, in_initial_mark_gc() will return true // for the duration of this pause. ! g1_policy()->decide_on_conc_mark_initiation(); } // We do not allow initial-mark to be piggy-backed on a mixed GC. assert(!collector_state()->in_initial_mark_gc() || collector_state()->in_young_only_phase(), "sanity"); --- 2904,2914 ---- // We should not be doing initial mark unless the conc mark thread is running if (!_cm_thread->should_terminate()) { // This call will decide whether this pause is an initial-mark // pause. If it is, in_initial_mark_gc() will return true // for the duration of this pause. ! policy()->decide_on_conc_mark_initiation(); } // We do not allow initial-mark to be piggy-backed on a mixed GC. assert(!collector_state()->in_initial_mark_gc() || collector_state()->in_young_only_phase(), "sanity");
*** 3016,3032 **** // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); ! g1_policy()->record_collection_pause_start(sample_start_time_sec); if (collector_state()->in_initial_mark_gc()) { concurrent_mark()->pre_initial_mark(); } ! g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor); evacuation_info.set_collectionset_regions(collection_set()->region_length()); register_humongous_regions_with_cset(); --- 3016,3032 ---- // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); ! policy()->record_collection_pause_start(sample_start_time_sec); if (collector_state()->in_initial_mark_gc()) { concurrent_mark()->pre_initial_mark(); } ! policy()->finalize_collection_set(target_pause_time_ms, &_survivor); evacuation_info.set_collectionset_regions(collection_set()->region_length()); register_humongous_regions_with_cset();
*** 3065,3080 **** _survivor_evac_stats.adjust_desired_plab_sz(); _old_evac_stats.adjust_desired_plab_sz(); double start = os::elapsedTime(); start_new_collection_set(); ! g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0); if (evacuation_failed()) { double recalculate_used_start = os::elapsedTime(); set_used(recalculate_used()); ! g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } for (uint i = 0; i < ParallelGCThreads; i++) { --- 3065,3080 ---- _survivor_evac_stats.adjust_desired_plab_sz(); _old_evac_stats.adjust_desired_plab_sz(); double start = os::elapsedTime(); start_new_collection_set(); ! policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0); if (evacuation_failed()) { double recalculate_used_start = os::elapsedTime(); set_used(recalculate_used()); ! policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } for (uint i = 0; i < ParallelGCThreads; i++) {
*** 3083,3093 **** } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! increase_used(g1_policy()->bytes_copied_during_gc()); } if (collector_state()->in_initial_mark_gc()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the --- 3083,3093 ---- } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! increase_used(policy()->bytes_copied_during_gc()); } if (collector_state()->in_initial_mark_gc()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the
*** 3110,3120 **** // expansion_amount() does this when it returns a value > 0. double expand_ms; if (!expand(expand_bytes, _workers, &expand_ms)) { // We failed to expand the heap. Cannot do anything about it. } ! g1_policy()->phase_times()->record_expand_heap_time(expand_ms); } } // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. --- 3110,3120 ---- // expansion_amount() does this when it returns a value > 0. double expand_ms; if (!expand(expand_bytes, _workers, &expand_ms)) { // We failed to expand the heap. Cannot do anything about it. } ! policy()->phase_times()->record_expand_heap_time(expand_ms); } } // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed.
*** 3123,3137 **** // This timing is only used by the ergonomics to handle our pause target. // It is unclear why this should not include the full pause. We will // investigate this in CR 7178365. double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; ! size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards); ! g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); ! evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); if (VerifyRememberedSets) { log_info(gc, verify)("[Verifying RemSets after GC]"); VerifyRegionRemSetClosure v_cl; heap_region_iterate(&v_cl); --- 3123,3137 ---- // This timing is only used by the ergonomics to handle our pause target. // It is unclear why this should not include the full pause. We will // investigate this in CR 7178365. double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; ! size_t total_cards_scanned = policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards); ! policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); ! evacuation_info.set_bytes_copied(policy()->bytes_copied_during_gc()); if (VerifyRememberedSets) { log_info(gc, verify)("[Verifying RemSets after GC]"); VerifyRegionRemSetClosure v_cl; heap_region_iterate(&v_cl);
*** 3156,3166 **** // Print the remainder of the GC log output. if (evacuation_failed()) { log_info(gc)("To-space exhausted"); } ! g1_policy()->print_phases(); heap_transition.print(); // It is not yet to safe to tell the concurrent mark to // start as we have some optional output below. We don't want the // output from the concurrent mark thread interfering with this --- 3156,3166 ---- // Print the remainder of the GC log output. if (evacuation_failed()) { log_info(gc)("To-space exhausted"); } ! policy()->print_phases(); heap_transition.print(); // It is not yet to safe to tell the concurrent mark to // start as we have some optional output below. We don't want the // output from the concurrent mark thread interfering with this
*** 3181,3191 **** // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); _gc_tracer_stw->report_evacuation_info(&evacuation_info); ! _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); _gc_timer_stw->register_gc_end(); _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); } // It should now be safe to tell the concurrent mark thread to start // without its logging output interfering with the logging output --- 3181,3191 ---- // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); _gc_tracer_stw->report_evacuation_info(&evacuation_info); ! _gc_tracer_stw->report_tenuring_threshold(_policy->tenuring_threshold()); _gc_timer_stw->register_gc_end(); _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); } // It should now be safe to tell the concurrent mark thread to start // without its logging output interfering with the logging output
*** 3215,3225 **** remove_self_forwarding_pointers(); SharedRestorePreservedMarksTaskExecutor task_executor(workers()); _preserved_marks_set.restore(&task_executor); ! g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); } void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) { if (!_evacuation_failed) { _evacuation_failed = true; --- 3215,3225 ---- remove_self_forwarding_pointers(); SharedRestorePreservedMarksTaskExecutor task_executor(workers()); _preserved_marks_set.restore(&task_executor); ! policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); } void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) { if (!_evacuation_failed) { _evacuation_failed = true;
*** 3273,3283 **** void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round double start_sec = os::elapsedTime(); ! _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec); { ResourceMark rm; HandleMark hm; --- 3273,3283 ---- void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round double start_sec = os::elapsedTime(); ! _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec); { ResourceMark rm; HandleMark hm;
*** 3288,3298 **** double start_strong_roots_sec = os::elapsedTime(); _root_processor->evacuate_roots(pss, worker_id); ! _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id); double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec; double term_sec = 0.0; size_t evac_term_attempts = 0; --- 3288,3298 ---- double start_strong_roots_sec = os::elapsedTime(); _root_processor->evacuate_roots(pss, worker_id); ! _g1h->rem_set()->oops_into_collection_set_do(pss, worker_id); double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec; double term_sec = 0.0; size_t evac_term_attempts = 0;
*** 3303,3313 **** evac_term_attempts = evac.term_attempts(); term_sec = evac.term_time(); double elapsed_sec = os::elapsedTime() - start; ! G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times(); p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec); p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy, worker_id, pss->lab_waste_words() * HeapWordSize, --- 3303,3313 ---- evac_term_attempts = evac.term_attempts(); term_sec = evac.term_time(); double elapsed_sec = os::elapsedTime() - start; ! G1GCPhaseTimes* p = _g1h->policy()->phase_times(); p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec); p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy, worker_id, pss->lab_waste_words() * HeapWordSize,
*** 3325,3335 **** // Close the inner scope so that the ResourceMark and HandleMark // destructors are executed here and are included as part of the // "GC Worker Time". } ! _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime()); } }; void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred) { --- 3325,3335 ---- // Close the inner scope so that the ResourceMark and HandleMark // destructors are executed here and are included as part of the // "GC Worker Time". } ! _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime()); } }; void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred) {
*** 3390,3400 **** public: G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"), _queue(queue), _g1h(g1h) { } virtual void work(uint worker_id) { ! G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times(); G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id); RedirtyLoggedCardTableEntryClosure cl(_g1h); _queue->par_apply_closure_to_all_completed_buffers(&cl); --- 3390,3400 ---- public: G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"), _queue(queue), _g1h(g1h) { } virtual void work(uint worker_id) { ! G1GCPhaseTimes* phase_times = _g1h->policy()->phase_times(); G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id); RedirtyLoggedCardTableEntryClosure cl(_g1h); _queue->par_apply_closure_to_all_completed_buffers(&cl);
*** 3411,3421 **** G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set(); dcq.merge_bufferlists(&dirty_card_queue_set()); assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); ! g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0); } // Weak Reference Processing support bool G1STWIsAliveClosure::do_object_b(oop p) { --- 3411,3421 ---- G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set(); dcq.merge_bufferlists(&dirty_card_queue_set()); assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); ! policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0); } // Weak Reference Processing support bool G1STWIsAliveClosure::do_object_b(oop p) {
*** 3644,3654 **** G1STWDrainQueueClosure drain_queue(this, pss); // Setup the soft refs policy... rp->setup_policy(false); ! ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... stats = rp->process_discovered_references(&is_alive, --- 3644,3654 ---- G1STWDrainQueueClosure drain_queue(this, pss); // Setup the soft refs policy... rp->setup_policy(false); ! ReferenceProcessorPhaseTimes* pt = policy()->phase_times()->ref_phase_times(); ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... stats = rp->process_discovered_references(&is_alive,
*** 3680,3690 **** make_pending_list_reachable(); rp->verify_no_references_recorded(); double ref_proc_time = os::elapsedTime() - ref_proc_start; ! g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { if (collector_state()->in_initial_mark_gc()) { oop pll_head = Universe::reference_pending_list(); --- 3680,3690 ---- make_pending_list_reachable(); rp->verify_no_references_recorded(); double ref_proc_time = os::elapsedTime() - ref_proc_start; ! policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { if (collector_state()->in_initial_mark_gc()) { oop pll_head = Universe::reference_pending_list();
*** 3696,3720 **** } void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) { double merge_pss_time_start = os::elapsedTime(); per_thread_states->flush(); ! g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0); } void G1CollectedHeap::pre_evacuate_collection_set() { _expand_heap_after_alloc_failure = true; _evacuation_failed = false; // Disable the hot card cache. _hot_card_cache->reset_hot_cache_claimed_index(); _hot_card_cache->set_use_cache(false); ! g1_rem_set()->prepare_for_oops_into_collection_set_do(); _preserved_marks_set.assert_empty(); ! G1GCPhaseTimes* phase_times = g1_policy()->phase_times(); // InitialMark needs claim bits to keep track of the marked-through CLDs. if (collector_state()->in_initial_mark_gc()) { double start_clear_claimed_marks = os::elapsedTime(); --- 3696,3720 ---- } void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) { double merge_pss_time_start = os::elapsedTime(); per_thread_states->flush(); ! policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0); } void G1CollectedHeap::pre_evacuate_collection_set() { _expand_heap_after_alloc_failure = true; _evacuation_failed = false; // Disable the hot card cache. _hot_card_cache->reset_hot_cache_claimed_index(); _hot_card_cache->set_use_cache(false); ! rem_set()->prepare_for_oops_into_collection_set_do(); _preserved_marks_set.assert_empty(); ! G1GCPhaseTimes* phase_times = policy()->phase_times(); // InitialMark needs claim bits to keep track of the marked-through CLDs. if (collector_state()->in_initial_mark_gc()) { double start_clear_claimed_marks = os::elapsedTime();
*** 3729,3739 **** // Should G1EvacuationFailureALot be in effect for this GC? NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); ! G1GCPhaseTimes* phase_times = g1_policy()->phase_times(); double start_par_time_sec = os::elapsedTime(); double end_par_time_sec; { --- 3729,3739 ---- // Should G1EvacuationFailureALot be in effect for this GC? NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); ! G1GCPhaseTimes* phase_times = policy()->phase_times(); double start_par_time_sec = os::elapsedTime(); double end_par_time_sec; {
*** 3788,3798 **** HeapRegion* hr = _optional->region_at(i); G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl); pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops()); copy_time += trim_ticks(pss); ! G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id); scan_rs_cl.do_heap_region(hr); copy_time += trim_ticks(pss); scanned += scan_rs_cl.cards_scanned(); claimed += scan_rs_cl.cards_claimed(); skipped += scan_rs_cl.cards_skipped(); --- 3788,3798 ---- HeapRegion* hr = _optional->region_at(i); G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl); pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops()); copy_time += trim_ticks(pss); ! G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id); scan_rs_cl.do_heap_region(hr); copy_time += trim_ticks(pss); scanned += scan_rs_cl.cards_scanned(); claimed += scan_rs_cl.cards_claimed(); skipped += scan_rs_cl.cards_skipped();
*** 3800,3810 **** // Chunk lists for this region is no longer needed. used_memory += pss->oops_into_optional_region(hr)->used_memory(); } Tickspan scan_time = (Ticks::now() - start) - copy_time; ! G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times(); p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds()); p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds()); p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards); p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards); --- 3800,3810 ---- // Chunk lists for this region is no longer needed. used_memory += pss->oops_into_optional_region(hr)->used_memory(); } Tickspan scan_time = (Ticks::now() - start) - copy_time; ! G1GCPhaseTimes* p = _g1h->policy()->phase_times(); p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds()); p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds()); p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards); p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
*** 3816,3826 **** Ticks start = Ticks::now(); G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy); cl.do_void(); Tickspan evac_time = (Ticks::now() - start); ! G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times(); p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds()); assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation"); } public: --- 3816,3826 ---- Ticks start = Ticks::now(); G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy); cl.do_void(); Tickspan evac_time = (Ticks::now() - start); ! G1GCPhaseTimes* p = _g1h->policy()->phase_times(); p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds()); assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation"); } public:
*** 3865,3875 **** if (evacuation_failed()) { return; } ! G1GCPhaseTimes* phase_times = g1_policy()->phase_times(); const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0; double start_time_sec = os::elapsedTime(); do { --- 3865,3875 ---- if (evacuation_failed()) { return; } ! G1GCPhaseTimes* phase_times = policy()->phase_times(); const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0; double start_time_sec = os::elapsedTime(); do {
*** 3879,3889 **** if (time_left_ms < 0) { log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms); break; } ! optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction()); if (optional_cset.prepare_failed()) { log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms); break; } --- 3879,3889 ---- if (time_left_ms < 0) { log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms); break; } ! optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction()); if (optional_cset.prepare_failed()) { log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms); break; }
*** 3899,3909 **** } void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) { // Also cleans the card table from temporary duplicate detection information used // during UpdateRS/ScanRS. ! g1_rem_set()->cleanup_after_oops_into_collection_set_do(); // Process any discovered reference objects - we have // to do this _before_ we retire the GC alloc regions // as we may have to copy some 'reachable' referent // objects (and their reachable sub-graphs) that were --- 3899,3909 ---- } void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) { // Also cleans the card table from temporary duplicate detection information used // during UpdateRS/ScanRS. ! rem_set()->cleanup_after_oops_into_collection_set_do(); // Process any discovered reference objects - we have // to do this _before_ we retire the GC alloc regions // as we may have to copy some 'reachable' referent // objects (and their reachable sub-graphs) that were
*** 3912,3930 **** G1STWIsAliveClosure is_alive(this); G1KeepAliveClosure keep_alive(this); WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, ! g1_policy()->phase_times()->weak_phase_times()); if (G1StringDedup::is_enabled()) { double string_dedup_time_ms = os::elapsedTime(); ! string_dedup_cleaning(&is_alive, &keep_alive, g1_policy()->phase_times()); double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0; ! g1_policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms); } if (evacuation_failed()) { restore_after_evac_failure(); --- 3912,3930 ---- G1STWIsAliveClosure is_alive(this); G1KeepAliveClosure keep_alive(this); WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, ! policy()->phase_times()->weak_phase_times()); if (G1StringDedup::is_enabled()) { double string_dedup_time_ms = os::elapsedTime(); ! string_dedup_cleaning(&is_alive, &keep_alive, policy()->phase_times()); double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0; ! policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms); } if (evacuation_failed()) { restore_after_evac_failure();
*** 3950,3966 **** redirty_logged_cards(); #if COMPILER2_OR_JVMCI double start = os::elapsedTime(); DerivedPointerTable::update_pointers(); ! g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0); #endif ! g1_policy()->print_age_table(); } void G1CollectedHeap::record_obj_copy_mem_stats() { ! g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize); _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats), create_g1_evac_summary(&_old_evac_stats)); } --- 3950,3966 ---- redirty_logged_cards(); #if COMPILER2_OR_JVMCI double start = os::elapsedTime(); DerivedPointerTable::update_pointers(); ! policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0); #endif ! policy()->print_age_table(); } void G1CollectedHeap::record_obj_copy_mem_stats() { ! policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize); _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats), create_g1_evac_summary(&_old_evac_stats)); }
*** 3984,3994 **** // (since we don't refine cards in young regions). if (!skip_hot_card_cache && !hr->is_young()) { _hot_card_cache->reset_card_counts(hr); } hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */); ! _g1_policy->remset_tracker()->update_at_free(hr); free_list->add_ordered(hr); } void G1CollectedHeap::free_humongous_region(HeapRegion* hr, FreeRegionList* free_list) { --- 3984,3994 ---- // (since we don't refine cards in young regions). if (!skip_hot_card_cache && !hr->is_young()) { _hot_card_cache->reset_card_counts(hr); } hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */); ! _policy->remset_tracker()->update_at_free(hr); free_list->add_ordered(hr); } void G1CollectedHeap::free_humongous_region(HeapRegion* hr, FreeRegionList* free_list) {
*** 4114,4124 **** _evacuation_info->increment_collectionset_used_after(_after_used_bytes); g1h->prepend_to_freelist(&_local_free_list); g1h->decrement_summary_bytes(_before_used_bytes); ! G1Policy* policy = g1h->g1_policy(); policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc); g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words); } }; --- 4114,4124 ---- _evacuation_info->increment_collectionset_used_after(_after_used_bytes); g1h->prepend_to_freelist(&_local_free_list); g1h->decrement_summary_bytes(_before_used_bytes); ! G1Policy* policy = g1h->policy(); policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc); g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words); } };
*** 4189,4199 **** } void complete_work() { _cl.complete_work(); ! G1Policy* policy = G1CollectedHeap::heap()->g1_policy(); policy->record_max_rs_lengths(_rs_lengths); policy->cset_regions_freed(); } public: G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) : --- 4189,4199 ---- } void complete_work() { _cl.complete_work(); ! G1Policy* policy = G1CollectedHeap::heap()->policy(); policy->record_max_rs_lengths(_rs_lengths); policy->cset_regions_freed(); } public: G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
*** 4217,4227 **** // Chunk size for work distribution. The chosen value has been determined experimentally // to be a good tradeoff between overhead and achievable parallelism. static uint chunk_size() { return 32; } virtual void work(uint worker_id) { ! G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); // Claim serial work. if (_serial_work_claim == 0) { jint value = Atomic::add(1, &_serial_work_claim) - 1; if (value == 0) { --- 4217,4227 ---- // Chunk size for work distribution. The chosen value has been determined experimentally // to be a good tradeoff between overhead and achievable parallelism. static uint chunk_size() { return 32; } virtual void work(uint worker_id) { ! G1GCPhaseTimes* timer = G1CollectedHeap::heap()->policy()->phase_times(); // Claim serial work. if (_serial_work_claim == 0) { jint value = Atomic::add(1, &_serial_work_claim) - 1; if (value == 0) {
*** 4294,4304 **** cl.name(), num_workers, _collection_set.region_length()); workers()->run_task(&cl, num_workers); } ! g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0); collection_set->clear(); } class G1FreeHumongousRegionClosure : public HeapRegionClosure { --- 4294,4304 ---- cl.name(), num_workers, _collection_set.region_length()); workers()->run_task(&cl, num_workers); } ! policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0); collection_set->clear(); } class G1FreeHumongousRegionClosure : public HeapRegionClosure {
*** 4419,4429 **** void G1CollectedHeap::eagerly_reclaim_humongous_regions() { assert_at_safepoint_on_vm_thread(); if (!G1EagerReclaimHumongousObjects || (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { ! g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); return; } double start_time = os::elapsedTime(); --- 4419,4429 ---- void G1CollectedHeap::eagerly_reclaim_humongous_regions() { assert_at_safepoint_on_vm_thread(); if (!G1EagerReclaimHumongousObjects || (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { ! policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); return; } double start_time = os::elapsedTime();
*** 4444,4454 **** } prepend_to_freelist(&local_cleanup_list); decrement_summary_bytes(cl.bytes_freed()); ! g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, cl.humongous_objects_reclaimed()); } class G1AbandonCollectionSetClosure : public HeapRegionClosure { public: --- 4444,4454 ---- } prepend_to_freelist(&local_cleanup_list); decrement_summary_bytes(cl.bytes_freed()); ! policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, cl.humongous_objects_reclaimed()); } class G1AbandonCollectionSetClosure : public HeapRegionClosure { public:
*** 4472,4482 **** return _allocator->is_retained_old_region(hr); } void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { _eden.add(hr); ! _g1_policy->set_region_eden(hr); } #ifdef ASSERT class NoYoungRegionsClosure: public HeapRegionClosure { --- 4472,4482 ---- return _allocator->is_retained_old_region(hr); } void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { _eden.add(hr); ! _policy->set_region_eden(hr); } #ifdef ASSERT class NoYoungRegionsClosure: public HeapRegionClosure {
*** 4642,4661 **** // Methods for the mutator alloc region HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, bool force) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); ! bool should_allocate = g1_policy()->should_allocate_mutator_region(); if (force || should_allocate) { HeapRegion* new_alloc_region = new_region(word_size, HeapRegionType::Eden, false /* do_expand */); if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, !should_allocate); _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region); ! _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region); return new_alloc_region; } } return NULL; } --- 4642,4661 ---- // Methods for the mutator alloc region HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, bool force) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); ! bool should_allocate = policy()->should_allocate_mutator_region(); if (force || should_allocate) { HeapRegion* new_alloc_region = new_region(word_size, HeapRegionType::Eden, false /* do_expand */); if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, !should_allocate); _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region); ! _policy->remset_tracker()->update_at_allocate(new_alloc_region); return new_alloc_region; } } return NULL; }
*** 4678,4688 **** bool G1CollectedHeap::has_more_regions(InCSetState dest) { if (dest.is_old()) { return true; } else { ! return survivor_regions_count() < g1_policy()->max_survivor_regions(); } } HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) { assert(FreeList_lock->owned_by_self(), "pre-condition"); --- 4678,4688 ---- bool G1CollectedHeap::has_more_regions(InCSetState dest) { if (dest.is_old()) { return true; } else { ! return survivor_regions_count() < policy()->max_survivor_regions(); } } HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) { assert(FreeList_lock->owned_by_self(), "pre-condition");
*** 4709,4729 **** _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old(); _verifier->check_bitmaps("Old Region Allocation", new_alloc_region); } ! _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region); _hr_printer.alloc(new_alloc_region); return new_alloc_region; } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest) { ! g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_old()) { old_set_add(alloc_region); } bool const during_im = collector_state()->in_initial_mark_gc(); --- 4709,4729 ---- _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old(); _verifier->check_bitmaps("Old Region Allocation", new_alloc_region); } ! _policy->remset_tracker()->update_at_allocate(new_alloc_region); _hr_printer.alloc(new_alloc_region); return new_alloc_region; } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest) { ! policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_old()) { old_set_add(alloc_region); } bool const during_im = collector_state()->in_initial_mark_gc();
*** 4824,4834 **** void G1CollectedHeap::purge_code_root_memory() { double purge_start = os::elapsedTime(); G1CodeRootSet::purge(); double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; ! g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); } class RebuildStrongCodeRootClosure: public CodeBlobClosure { G1CollectedHeap* _g1h; --- 4824,4834 ---- void G1CollectedHeap::purge_code_root_memory() { double purge_start = os::elapsedTime(); G1CodeRootSet::purge(); double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; ! policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); } class RebuildStrongCodeRootClosure: public CodeBlobClosure { G1CollectedHeap* _g1h;
< prev index next >