< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 60257 : [mq]: 8248401-unify-millis-since-last-gc


1400     // humongous set should be invoked while holding the Heap_lock.
1401 
1402     if (SafepointSynchronize::is_at_safepoint()) {
1403       guarantee(Thread::current()->is_VM_thread() ||
1404                 OldSets_lock->owned_by_self(),
1405                 "master humongous set MT safety protocol at a safepoint");
1406     } else {
1407       guarantee(Heap_lock->owned_by_self(),
1408                 "master humongous set MT safety protocol outside a safepoint");
1409     }
1410   }
1411   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1412   const char* get_description() { return "Humongous Regions"; }
1413 };
1414 
1415 G1CollectedHeap::G1CollectedHeap() :
1416   CollectedHeap(),
1417   _young_gen_sampling_thread(NULL),
1418   _workers(NULL),
1419   _card_table(NULL),

1420   _soft_ref_policy(),
1421   _old_set("Old Region Set", new OldRegionSetChecker()),
1422   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1423   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1424   _bot(NULL),
1425   _listener(),
1426   _numa(G1NUMA::create()),
1427   _hrm(NULL),
1428   _allocator(NULL),
1429   _verifier(NULL),
1430   _summary_bytes_used(0),
1431   _bytes_used_during_gc(0),
1432   _archive_allocator(NULL),
1433   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1434   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1435   _expand_heap_after_alloc_failure(true),
1436   _g1mm(NULL),
1437   _humongous_reclaim_candidates(),
1438   _has_humongous_reclaim_candidates(false),
1439   _hr_printer(),


1949       MemRegion mr(dummy_obj, word_size);
1950       CollectedHeap::fill_with_object(mr);
1951     } else {
1952       // If we can't allocate once, we probably cannot allocate
1953       // again. Let's get out of the loop.
1954       break;
1955     }
1956   }
1957 }
1958 #endif // !PRODUCT
1959 
1960 void G1CollectedHeap::increment_old_marking_cycles_started() {
1961   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
1962          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
1963          "Wrong marking cycle count (started: %d, completed: %d)",
1964          _old_marking_cycles_started, _old_marking_cycles_completed);
1965 
1966   _old_marking_cycles_started++;
1967 }
1968 
1969 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {

1970   MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
1971 
1972   // We assume that if concurrent == true, then the caller is a
1973   // concurrent thread that was joined the Suspendible Thread
1974   // Set. If there's ever a cheap way to check this, we should add an
1975   // assert here.
1976 
1977   // Given that this method is called at the end of a Full GC or of a
1978   // concurrent cycle, and those can be nested (i.e., a Full GC can
1979   // interrupt a concurrent cycle), the number of full collections
1980   // completed should be either one (in the case where there was no
1981   // nesting) or two (when a Full GC interrupted a concurrent cycle)
1982   // behind the number of full collections started.
1983 
1984   // This is the case for the inner caller, i.e. a Full GC.
1985   assert(concurrent ||
1986          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
1987          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1988          "for inner caller (Full GC): _old_marking_cycles_started = %u "
1989          "is inconsistent with _old_marking_cycles_completed = %u",
1990          _old_marking_cycles_started, _old_marking_cycles_completed);
1991 
1992   // This is the case for the outer caller, i.e. the concurrent cycle.
1993   assert(!concurrent ||
1994          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1995          "for outer caller (concurrent cycle): "
1996          "_old_marking_cycles_started = %u "
1997          "is inconsistent with _old_marking_cycles_completed = %u",
1998          _old_marking_cycles_started, _old_marking_cycles_completed);
1999 
2000   _old_marking_cycles_completed += 1;



2001 
2002   // We need to clear the "in_progress" flag in the CM thread before
2003   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2004   // is set) so that if a waiter requests another System.gc() it doesn't
2005   // incorrectly see that a marking cycle is still in progress.
2006   if (concurrent) {
2007     _cm_thread->set_idle();
2008   }
2009 
2010   // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2011   // for a full GC to finish that their wait is over.
2012   ml.notify_all();
2013 }
2014 
2015 void G1CollectedHeap::collect(GCCause::Cause cause) {
2016   try_collect(cause);
2017 }
2018 
2019 // Return true if (x < y) with allowance for wraparound.
2020 static bool gc_counter_less_than(uint x, uint y) {


2349 }
2350 
2351 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2352 // must be equal to the humongous object limit.
2353 size_t G1CollectedHeap::max_tlab_size() const {
2354   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2355 }
2356 
2357 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2358   return _allocator->unsafe_max_tlab_alloc();
2359 }
2360 
2361 size_t G1CollectedHeap::max_capacity() const {
2362   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2363 }
2364 
2365 size_t G1CollectedHeap::max_reserved_capacity() const {
2366   return _hrm->max_length() * HeapRegion::GrainBytes;
2367 }
2368 
2369 jlong G1CollectedHeap::millis_since_last_gc() {
2370   // See the notes in GenCollectedHeap::millis_since_last_gc()
2371   // for more information about the implementation.
2372   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2373                   _policy->collection_pause_end_millis();
2374   if (ret_val < 0) {
2375     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2376       ". returning zero instead.", ret_val);
2377     return 0;
2378   }
2379   return ret_val;
2380 }
2381 
2382 void G1CollectedHeap::deduplicate_string(oop str) {
2383   assert(java_lang_String::is_instance(str), "invariant");
2384 
2385   if (G1StringDedup::is_enabled()) {
2386     G1StringDedup::deduplicate(str);
2387   }
2388 }
2389 
2390 void G1CollectedHeap::prepare_for_verify() {
2391   _verifier->prepare_for_verify();
2392 }
2393 
2394 void G1CollectedHeap::verify(VerifyOption vo) {
2395   _verifier->verify(vo);
2396 }
2397 
2398 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2399   return true;
2400 }
2401 


2624     ensure_parsability(true);
2625     Tickspan dt = Ticks::now() - start;
2626     phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
2627   }
2628 
2629   if (!full) {
2630     // Flush dirty card queues to qset, so later phases don't need to account
2631     // for partially filled per-thread queues and such.  Not needed for full
2632     // collections, which ignore those logs.
2633     Ticks start = Ticks::now();
2634     G1BarrierSet::dirty_card_queue_set().concatenate_logs();
2635     Tickspan dt = Ticks::now() - start;
2636     phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
2637   }
2638 }
2639 
2640 void G1CollectedHeap::gc_epilogue(bool full) {
2641   // Update common counters.
2642   if (full) {
2643     // Update the number of full collections that have been completed.
2644     increment_old_marking_cycles_completed(false /* concurrent */);
2645   }
2646 
2647   // We are at the end of the GC. Total collections has already been increased.
2648   rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2649 
2650   // FIXME: what is this about?
2651   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2652   // is set.
2653 #if COMPILER2_OR_JVMCI
2654   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2655 #endif
2656 
2657   double start = os::elapsedTime();
2658   resize_all_tlabs();
2659   phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2660 
2661   MemoryService::track_memory_usage();
2662   // We have just completed a GC. Update the soft reference
2663   // policy with the new heap occupancy
2664   Universe::update_heap_info_at_gc();
2665 
2666   // Print NUMA statistics.
2667   _numa->print_statistics();


2668 }
2669 
2670 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2671   LogTarget(Trace, gc, heap, verify) lt;
2672 
2673   if (lt.is_enabled()) {
2674     LogStream ls(lt);
2675     // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2676     G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2677     heap_region_iterate(&cl);
2678   }
2679 }
2680 
2681 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2682                                                uint gc_count_before,
2683                                                bool* succeeded,
2684                                                GCCause::Cause gc_cause) {
2685   assert_heap_not_locked_and_not_at_safepoint();
2686   VM_G1CollectForAllocation op(word_size,
2687                                gc_count_before,




1400     // humongous set should be invoked while holding the Heap_lock.
1401 
1402     if (SafepointSynchronize::is_at_safepoint()) {
1403       guarantee(Thread::current()->is_VM_thread() ||
1404                 OldSets_lock->owned_by_self(),
1405                 "master humongous set MT safety protocol at a safepoint");
1406     } else {
1407       guarantee(Heap_lock->owned_by_self(),
1408                 "master humongous set MT safety protocol outside a safepoint");
1409     }
1410   }
1411   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1412   const char* get_description() { return "Humongous Regions"; }
1413 };
1414 
1415 G1CollectedHeap::G1CollectedHeap() :
1416   CollectedHeap(),
1417   _young_gen_sampling_thread(NULL),
1418   _workers(NULL),
1419   _card_table(NULL),
1420   _collection_pause_end(Ticks::now()),
1421   _soft_ref_policy(),
1422   _old_set("Old Region Set", new OldRegionSetChecker()),
1423   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1424   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1425   _bot(NULL),
1426   _listener(),
1427   _numa(G1NUMA::create()),
1428   _hrm(NULL),
1429   _allocator(NULL),
1430   _verifier(NULL),
1431   _summary_bytes_used(0),
1432   _bytes_used_during_gc(0),
1433   _archive_allocator(NULL),
1434   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1435   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1436   _expand_heap_after_alloc_failure(true),
1437   _g1mm(NULL),
1438   _humongous_reclaim_candidates(),
1439   _has_humongous_reclaim_candidates(false),
1440   _hr_printer(),


1950       MemRegion mr(dummy_obj, word_size);
1951       CollectedHeap::fill_with_object(mr);
1952     } else {
1953       // If we can't allocate once, we probably cannot allocate
1954       // again. Let's get out of the loop.
1955       break;
1956     }
1957   }
1958 }
1959 #endif // !PRODUCT
1960 
1961 void G1CollectedHeap::increment_old_marking_cycles_started() {
1962   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
1963          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
1964          "Wrong marking cycle count (started: %d, completed: %d)",
1965          _old_marking_cycles_started, _old_marking_cycles_completed);
1966 
1967   _old_marking_cycles_started++;
1968 }
1969 
1970 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
1971                                                              bool liveness_completed) {
1972   MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
1973 
1974   // We assume that if concurrent == true, then the caller is a
1975   // concurrent thread that was joined the Suspendible Thread
1976   // Set. If there's ever a cheap way to check this, we should add an
1977   // assert here.
1978 
1979   // Given that this method is called at the end of a Full GC or of a
1980   // concurrent cycle, and those can be nested (i.e., a Full GC can
1981   // interrupt a concurrent cycle), the number of full collections
1982   // completed should be either one (in the case where there was no
1983   // nesting) or two (when a Full GC interrupted a concurrent cycle)
1984   // behind the number of full collections started.
1985 
1986   // This is the case for the inner caller, i.e. a Full GC.
1987   assert(concurrent ||
1988          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
1989          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1990          "for inner caller (Full GC): _old_marking_cycles_started = %u "
1991          "is inconsistent with _old_marking_cycles_completed = %u",
1992          _old_marking_cycles_started, _old_marking_cycles_completed);
1993 
1994   // This is the case for the outer caller, i.e. the concurrent cycle.
1995   assert(!concurrent ||
1996          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1997          "for outer caller (concurrent cycle): "
1998          "_old_marking_cycles_started = %u "
1999          "is inconsistent with _old_marking_cycles_completed = %u",
2000          _old_marking_cycles_started, _old_marking_cycles_completed);
2001 
2002   _old_marking_cycles_completed += 1;
2003   if (liveness_completed) {
2004     next_whole_heap_examined();
2005   }
2006 
2007   // We need to clear the "in_progress" flag in the CM thread before
2008   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2009   // is set) so that if a waiter requests another System.gc() it doesn't
2010   // incorrectly see that a marking cycle is still in progress.
2011   if (concurrent) {
2012     _cm_thread->set_idle();
2013   }
2014 
2015   // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2016   // for a full GC to finish that their wait is over.
2017   ml.notify_all();
2018 }
2019 
2020 void G1CollectedHeap::collect(GCCause::Cause cause) {
2021   try_collect(cause);
2022 }
2023 
2024 // Return true if (x < y) with allowance for wraparound.
2025 static bool gc_counter_less_than(uint x, uint y) {


2354 }
2355 
2356 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2357 // must be equal to the humongous object limit.
2358 size_t G1CollectedHeap::max_tlab_size() const {
2359   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2360 }
2361 
2362 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2363   return _allocator->unsafe_max_tlab_alloc();
2364 }
2365 
2366 size_t G1CollectedHeap::max_capacity() const {
2367   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2368 }
2369 
2370 size_t G1CollectedHeap::max_reserved_capacity() const {
2371   return _hrm->max_length() * HeapRegion::GrainBytes;
2372 }
2373 













2374 void G1CollectedHeap::deduplicate_string(oop str) {
2375   assert(java_lang_String::is_instance(str), "invariant");
2376 
2377   if (G1StringDedup::is_enabled()) {
2378     G1StringDedup::deduplicate(str);
2379   }
2380 }
2381 
2382 void G1CollectedHeap::prepare_for_verify() {
2383   _verifier->prepare_for_verify();
2384 }
2385 
2386 void G1CollectedHeap::verify(VerifyOption vo) {
2387   _verifier->verify(vo);
2388 }
2389 
2390 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2391   return true;
2392 }
2393 


2616     ensure_parsability(true);
2617     Tickspan dt = Ticks::now() - start;
2618     phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
2619   }
2620 
2621   if (!full) {
2622     // Flush dirty card queues to qset, so later phases don't need to account
2623     // for partially filled per-thread queues and such.  Not needed for full
2624     // collections, which ignore those logs.
2625     Ticks start = Ticks::now();
2626     G1BarrierSet::dirty_card_queue_set().concatenate_logs();
2627     Tickspan dt = Ticks::now() - start;
2628     phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
2629   }
2630 }
2631 
2632 void G1CollectedHeap::gc_epilogue(bool full) {
2633   // Update common counters.
2634   if (full) {
2635     // Update the number of full collections that have been completed.
2636     increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */);
2637   }
2638 
2639   // We are at the end of the GC. Total collections has already been increased.
2640   rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2641 
2642   // FIXME: what is this about?
2643   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2644   // is set.
2645 #if COMPILER2_OR_JVMCI
2646   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2647 #endif
2648 
2649   double start = os::elapsedTime();
2650   resize_all_tlabs();
2651   phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2652 
2653   MemoryService::track_memory_usage();
2654   // We have just completed a GC. Update the soft reference
2655   // policy with the new heap occupancy
2656   Universe::update_heap_info_at_gc();
2657 
2658   // Print NUMA statistics.
2659   _numa->print_statistics();
2660 
2661   _collection_pause_end = Ticks::now();
2662 }
2663 
2664 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2665   LogTarget(Trace, gc, heap, verify) lt;
2666 
2667   if (lt.is_enabled()) {
2668     LogStream ls(lt);
2669     // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2670     G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2671     heap_region_iterate(&cl);
2672   }
2673 }
2674 
2675 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2676                                                uint gc_count_before,
2677                                                bool* succeeded,
2678                                                GCCause::Cause gc_cause) {
2679   assert_heap_not_locked_and_not_at_safepoint();
2680   VM_G1CollectForAllocation op(word_size,
2681                                gc_count_before,


< prev index next >