--- old/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-08-07 13:15:55.333975830 +0200 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-08-07 13:15:55.201973732 +0200 @@ -129,11 +129,6 @@ virtual void prepare_for_verify() {} virtual void verify(VerifyOption option) {} - virtual jlong millis_since_last_gc() { - // Report time since the VM start - return os::elapsed_counter() / NANOSECS_PER_MILLISEC; - } - MemRegion reserved_region() const { return _reserved; } bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); } --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-07 13:15:55.813983464 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-07 13:15:55.685981429 +0200 @@ -1417,6 +1417,7 @@ _young_gen_sampling_thread(NULL), _workers(NULL), _card_table(NULL), + _collection_pause_end(Ticks::now()), _soft_ref_policy(), _old_set("Old Region Set", new OldRegionSetChecker()), _archive_set("Archive Region Set", new ArchiveRegionSetChecker()), @@ -1966,7 +1967,8 @@ _old_marking_cycles_started++; } -void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) { +void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent, + bool whole_heap_examined) { MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag); // We assume that if concurrent == true, then the caller is a @@ -1998,6 +2000,10 @@ _old_marking_cycles_started, _old_marking_cycles_completed); _old_marking_cycles_completed += 1; + if (whole_heap_examined) { + // Signal that we have completed a visit to all live objects. + record_whole_heap_examined_timestamp(); + } // We need to clear the "in_progress" flag in the CM thread before // we wake up any waiters (especially when ExplicitInvokesConcurrent @@ -2366,19 +2372,6 @@ return _hrm->max_length() * HeapRegion::GrainBytes; } -jlong G1CollectedHeap::millis_since_last_gc() { - // See the notes in GenCollectedHeap::millis_since_last_gc() - // for more information about the implementation. - jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - - _policy->collection_pause_end_millis(); - if (ret_val < 0) { - log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT - ". returning zero instead.", ret_val); - return 0; - } - return ret_val; -} - void G1CollectedHeap::deduplicate_string(oop str) { assert(java_lang_String::is_instance(str), "invariant"); @@ -2641,7 +2634,7 @@ // Update common counters. if (full) { // Update the number of full collections that have been completed. - increment_old_marking_cycles_completed(false /* concurrent */); + increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */); } // We are at the end of the GC. Total collections has already been increased. @@ -2665,6 +2658,8 @@ // Print NUMA statistics. _numa->print_statistics(); + + _collection_pause_end = Ticks::now(); } void G1CollectedHeap::verify_numa_regions(const char* desc) { --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-08-07 13:15:56.385992562 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-08-07 13:15:56.257990526 +0200 @@ -159,6 +159,8 @@ WorkGang* _workers; G1CardTable* _card_table; + Ticks _collection_pause_end; + SoftRefPolicy _soft_ref_policy; static size_t _humongous_object_threshold_in_words; @@ -644,7 +646,10 @@ // the G1OldGCCount_lock in case a Java thread is waiting for a full // GC to happen (e.g., it called System.gc() with // +ExplicitGCInvokesConcurrent). - void increment_old_marking_cycles_completed(bool concurrent); + // whole_heap_examined should indicate that during that old marking + // cycle the whole heap has been examined for live objects (as opposed + // to only parts, or aborted before completion). + void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined); uint old_marking_cycles_completed() { return _old_marking_cycles_completed; @@ -1288,8 +1293,7 @@ // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. virtual size_t max_reserved_capacity() const; - virtual jlong millis_since_last_gc(); - + Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; } // Convenience function to be used in situations where the heap type can be // asserted to be this type. --- old/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp 2020-08-07 13:15:56.894000643 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp 2020-08-07 13:15:56.761998544 +0200 @@ -268,7 +268,8 @@ // called System.gc() with +ExplicitGCInvokesConcurrent). { SuspendibleThreadSetJoiner sts_join; - g1h->increment_old_marking_cycles_completed(true /* concurrent */); + g1h->increment_old_marking_cycles_completed(true /* concurrent */, + !_cm->has_aborted() /* liveness_completed */); _cm->concurrent_cycle_end(); ConcurrentGCBreakpoints::notify_active_to_idle(); --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-08-07 13:15:57.370008212 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-08-07 13:15:57.238006113 +0200 @@ -60,7 +60,6 @@ _ihop_control(create_ihop_control(&_predictor)), _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)), _full_collection_start_sec(0.0), - _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), _young_list_target_length(0), _young_list_fixed_length(0), _young_list_max_length(0), @@ -648,8 +647,6 @@ record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec); - _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - if (is_concurrent_start_pause(this_pause)) { record_concurrent_mark_init_end(0.0); } else { --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-08-07 13:15:57.854015910 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-08-07 13:15:57.730013937 +0200 @@ -74,8 +74,6 @@ double _full_collection_start_sec; - jlong _collection_pause_end_millis; - uint _young_list_target_length; uint _young_list_fixed_length; @@ -260,8 +258,6 @@ // percentage of the current heap capacity. double reclaimable_bytes_percent(size_t reclaimable_bytes) const; - jlong collection_pause_end_millis() { return _collection_pause_end_millis; } - private: void clear_collection_set_candidates(); // Sets up marking if proper conditions are met. --- old/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp 2020-08-07 13:15:58.330023479 +0200 +++ new/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp 2020-08-07 13:15:58.206021506 +0200 @@ -56,14 +56,15 @@ } bool G1YoungRemSetSamplingThread::should_start_periodic_gc() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); // If we are currently in a concurrent mark we are going to uncommit memory soon. - if (G1CollectedHeap::heap()->concurrent_mark()->cm_thread()->during_cycle()) { + if (g1h->concurrent_mark()->cm_thread()->during_cycle()) { log_debug(gc, periodic)("Concurrent cycle in progress. Skipping."); return false; } // Check if enough time has passed since the last GC. - uintx time_since_last_gc = (uintx)Universe::heap()->millis_since_last_gc(); + uintx time_since_last_gc = (uintx)g1h->time_since_last_collection().milliseconds(); if ((time_since_last_gc < G1PeriodicGCInterval)) { log_debug(gc, periodic)("Last GC occurred " UINTX_FORMAT "ms before which is below threshold " UINTX_FORMAT "ms. Skipping.", time_since_last_gc, G1PeriodicGCInterval); --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-07 13:15:58.806031048 +0200 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-07 13:15:58.686029138 +0200 @@ -559,10 +559,6 @@ return block_start(addr) == addr; } -jlong ParallelScavengeHeap::millis_since_last_gc() { - return PSParallelCompact::millis_since_last_gc(); -} - void ParallelScavengeHeap::prepare_for_verify() { ensure_parsability(false); // no need to retire TLABs for verification } --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-08-07 13:15:59.286038677 +0200 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-08-07 13:15:59.162036708 +0200 @@ -213,8 +213,6 @@ HeapWord* block_start(const void* addr) const; bool block_is_obj(const HeapWord* addr) const; - jlong millis_since_last_gc(); - void prepare_for_verify(); PSHeapSummary create_ps_heap_summary(); virtual void print_on(outputStream* st) const; --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2020-08-07 13:15:59.762046247 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2020-08-07 13:15:59.638044277 +0200 @@ -843,7 +843,6 @@ elapsedTimer PSParallelCompact::_accumulated_time; unsigned int PSParallelCompact::_total_invocations = 0; unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; -jlong PSParallelCompact::_time_of_last_gc = 0; CollectorCounters* PSParallelCompact::_counters = NULL; ParMarkBitMap PSParallelCompact::_mark_bitmap; ParallelCompactData PSParallelCompact::_summary_data; @@ -1070,8 +1069,8 @@ heap->gen_mangle_unused_area(); } - // Update time of last GC - reset_millis_since_last_gc(); + // Signal that we have completed a visit to all live objects. + Universe::heap()->record_whole_heap_examined_timestamp(); } HeapWord* @@ -3197,25 +3196,6 @@ } } -jlong PSParallelCompact::millis_since_last_gc() { - // We need a monotonically non-decreasing time in ms but - // os::javaTimeMillis() does not guarantee monotonicity. - jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - jlong ret_val = now - _time_of_last_gc; - // XXX See note in genCollectedHeap::millis_since_last_gc(). - if (ret_val < 0) { - NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);) - return 0; - } - return ret_val; -} - -void PSParallelCompact::reset_millis_since_last_gc() { - // We need a monotonically non-decreasing time in ms but - // os::javaTimeMillis() does not guarantee monotonicity. - _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; -} - ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() { if (source() != copy_destination()) { --- old/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2020-08-07 13:16:00.298054770 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2020-08-07 13:16:00.174052798 +0200 @@ -1009,7 +1009,6 @@ static elapsedTimer _accumulated_time; static unsigned int _total_invocations; static unsigned int _maximum_compaction_gc_num; - static jlong _time_of_last_gc; // ms static CollectorCounters* _counters; static ParMarkBitMap _mark_bitmap; static ParallelCompactData _summary_data; @@ -1123,9 +1122,6 @@ static void enqueue_dense_prefix_tasks(TaskQueue& task_queue, uint parallel_gc_threads); - // Reset time since last full gc - static void reset_millis_since_last_gc(); - #ifndef PRODUCT // Print generic summary data static void print_generic_summary_data(ParallelCompactData& summary_data, @@ -1249,9 +1245,6 @@ // Return the SpaceId for the given address. static SpaceId space_id(HeapWord* addr); - // Time since last full gc (in milliseconds). - static jlong millis_since_last_gc(); - static void print_on_error(outputStream* st); #ifndef PRODUCT --- old/src/hotspot/share/gc/serial/defNewGeneration.cpp 2020-08-07 13:16:00.794062656 +0200 +++ new/src/hotspot/share/gc/serial/defNewGeneration.cpp 2020-08-07 13:16:00.666060619 +0200 @@ -680,12 +680,6 @@ from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); - // We need to use a monotonically non-decreasing time in ms - // or we will see time-warp warnings and os::javaTimeMillis() - // does not guarantee monotonicity. - jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - update_time_of_last_gc(now); - heap->trace_heap_after_gc(&gc_tracer); _gc_timer->register_gc_end(); --- old/src/hotspot/share/gc/serial/genMarkSweep.cpp 2020-08-07 13:16:01.278070348 +0200 +++ new/src/hotspot/share/gc/serial/genMarkSweep.cpp 2020-08-07 13:16:01.150068315 +0200 @@ -137,13 +137,8 @@ // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); - // Update time of last gc for all generations we collected - // (which currently is all the generations in the heap). - // We need to use a monotonically non-decreasing time in ms - // or we will see time-warp warnings and os::javaTimeMillis() - // does not guarantee monotonicity. - jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - gch->update_time_of_last_gc(now); + // Signal that we have completed a visit to all live objects. + Universe::heap()->record_whole_heap_examined_timestamp(); gch->trace_heap_after_gc(_gc_tracer); } --- old/src/hotspot/share/gc/shared/collectedHeap.cpp 2020-08-07 13:16:01.758077979 +0200 +++ new/src/hotspot/share/gc/shared/collectedHeap.cpp 2020-08-07 13:16:01.634076009 +0200 @@ -191,6 +191,7 @@ CollectedHeap::CollectedHeap() : _is_gc_active(false), + _last_whole_heap_examined_time_ns(os::javaTimeNanos()), _total_collections(0), _total_full_collections(0), _gc_cause(GCCause::_no_gc), @@ -488,6 +489,14 @@ } } +jlong CollectedHeap::millis_since_last_whole_heap_examined() { + return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC; +} + +void CollectedHeap::record_whole_heap_examined_timestamp() { + _last_whole_heap_examined_time_ns = os::javaTimeNanos(); +} + void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { assert(timer != NULL, "timer is null"); if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-08-07 13:16:02.254085865 +0200 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-08-07 13:16:02.122083768 +0200 @@ -112,6 +112,12 @@ // Used for filler objects (static, but initialized in ctor). static size_t _filler_array_max_size; + // Last time the whole heap has been examined in support of RMI + // MaxObjectInspectionAge. + // This timestamp must be monotonically non-decreasing to avoid + // time-warp warnings. + jlong _last_whole_heap_examined_time_ns; + unsigned int _total_collections; // ... started unsigned int _total_full_collections; // ... started NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) @@ -404,15 +410,18 @@ // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj) {} - // Returns the longest time (in ms) that has elapsed since the last - // time that any part of the heap was examined by a garbage collection. - virtual jlong millis_since_last_gc() = 0; - // Perform any cleanup actions necessary before allowing a verification. virtual void prepare_for_verify() = 0; - // Generate any dumps preceding or following a full gc + // Returns the longest time (in ms) that has elapsed since the last + // time that the whole heap has been examined by a garbage collection. + jlong millis_since_last_whole_heap_examined(); + // GC should call this when the next whole heap analysis has completed to + // satisfy above requirement. + void record_whole_heap_examined_timestamp(); + private: + // Generate any dumps preceding or following a full gc void full_gc_dump(GCTimer* timer, bool before); virtual void initialize_serviceability() = 0; --- old/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2020-08-07 13:16:02.746093687 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2020-08-07 13:16:02.618091650 +0200 @@ -1358,37 +1358,3 @@ } return oop(result); } - -class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { - jlong _time; // in ms - jlong _now; // in ms - - public: - GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } - - jlong time() { return _time; } - - void do_generation(Generation* gen) { - _time = MIN2(_time, gen->time_of_last_gc(_now)); - } -}; - -jlong GenCollectedHeap::millis_since_last_gc() { - // javaTimeNanos() is guaranteed to be monotonically non-decreasing - // provided the underlying platform provides such a time source - // (and it is bug free). So we still have to guard against getting - // back a time later than 'now'. - jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - GenTimeOfLastGCClosure tolgc_cl(now); - // iterate over generations getting the oldest - // time that a generation was collected - generation_iterate(&tolgc_cl, false); - - jlong retVal = now - tolgc_cl.time(); - if (retVal < 0) { - log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT - ". returning zero instead.", retVal); - return 0; - } - return retVal; -} --- old/src/hotspot/share/gc/shared/genCollectedHeap.hpp 2020-08-07 13:16:03.230101381 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.hpp 2020-08-07 13:16:03.110099473 +0200 @@ -290,10 +290,6 @@ // Ensure parsability: override virtual void ensure_parsability(bool retire_tlabs); - // Time in ms since the longest time a collector ran in - // in any generation. - virtual jlong millis_since_last_gc(); - // Total number of full collections completed. unsigned int total_full_collections_completed() { assert(_full_collections_completed <= _total_full_collections, @@ -306,12 +302,6 @@ // Update above counter, as appropriate, at the end of a concurrent GC cycle unsigned int update_full_collections_completed(unsigned int count); - // Update "time of last gc" for all generations to "now". - void update_time_of_last_gc(jlong now) { - _young_gen->update_time_of_last_gc(now); - _old_gen->update_time_of_last_gc(now); - } - // Update the gc statistics for each generation. void update_gc_stats(Generation* current_generation, bool full) { _old_gen->update_gc_stats(current_generation, full); --- old/src/hotspot/share/gc/shared/generation.hpp 2020-08-07 13:16:03.726109264 +0200 +++ new/src/hotspot/share/gc/shared/generation.hpp 2020-08-07 13:16:03.602107293 +0200 @@ -75,7 +75,6 @@ class Generation: public CHeapObj { friend class VMStructs; private: - jlong _time_of_last_gc; // time when last gc on this generation happened (ms) MemRegion _prev_used_region; // for collectors that want to "remember" a value for // used region at some specific point during collection. @@ -363,25 +362,6 @@ // activity to make them parsable again. The default is to do nothing. virtual void ensure_parsability() {} - // Time (in ms) when we were last collected or now if a collection is - // in progress. - virtual jlong time_of_last_gc(jlong now) { - // Both _time_of_last_gc and now are set using a time source - // that guarantees monotonically non-decreasing values provided - // the underlying platform provides such a source. So we still - // have to guard against non-monotonicity. - NOT_PRODUCT( - if (now < _time_of_last_gc) { - log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); - } - ) - return _time_of_last_gc; - } - - virtual void update_time_of_last_gc(jlong now) { - _time_of_last_gc = now; - } - // Generations may keep statistics about collection. This method // updates those statistics. current_generation is the generation // that was most recently collected. This allows the generation to --- old/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2020-08-07 13:16:04.202116830 +0200 +++ new/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2020-08-07 13:16:04.078114859 +0200 @@ -235,6 +235,9 @@ // global soft refs policy, and we better report it every time heap // usage goes down. Universe::update_heap_info_at_gc(); + + // Signal that we have completed a visit to all live objects. + Universe::heap()->record_whole_heap_examined_timestamp(); } // Disable forced counters update, and update counters one more time --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-07 13:16:04.702124776 +0200 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-07 13:16:04.574122742 +0200 @@ -1193,12 +1193,6 @@ return BlockLocationPrinter::print_location(st, addr); } -jlong ShenandoahHeap::millis_since_last_gc() { - double v = heuristics()->time_since_last_gc() * 1000; - assert(0 <= v && v <= max_jlong, "value should fit: %f", v); - return (jlong)v; -} - void ShenandoahHeap::prepare_for_verify() { if (SafepointSynchronize::is_at_safepoint() && UseTLAB) { labs_make_parsable(); --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-08-07 13:16:05.242133359 +0200 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-08-07 13:16:05.114131325 +0200 @@ -556,9 +556,6 @@ // Keep alive an object that was loaded with AS_NO_KEEPALIVE. void keep_alive(oop obj); - // Used by RMI - jlong millis_since_last_gc(); - // ---------- Safepoint interface hooks // public: --- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-08-07 13:16:05.734141176 +0200 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-08-07 13:16:05.610139208 +0200 @@ -277,10 +277,6 @@ return _runtime_workers.workers(); } -jlong ZCollectedHeap::millis_since_last_gc() { - return ZStatCycle::time_since_last() / MILLIUNITS; -} - void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { tc->do_thread(_director); tc->do_thread(_driver); --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-08-07 13:16:06.226148998 +0200 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-08-07 13:16:06.102147027 +0200 @@ -107,8 +107,6 @@ virtual WorkGang* get_safepoint_workers(); - virtual jlong millis_since_last_gc(); - virtual void gc_threads_do(ThreadClosure* tc) const; virtual VirtualSpaceSummary create_heap_space_summary(); --- old/src/hotspot/share/gc/z/zDriver.cpp 2020-08-07 13:16:06.702156561 +0200 +++ new/src/hotspot/share/gc/z/zDriver.cpp 2020-08-07 13:16:06.578154590 +0200 @@ -381,6 +381,9 @@ // Update data used by soft reference policy Universe::update_heap_info_at_gc(); + + // Signal that we have completed a visit to all live objects + Universe::heap()->record_whole_heap_examined_timestamp(); } }; --- old/src/hotspot/share/prims/jvm.cpp 2020-08-07 13:16:07.694172324 +0200 +++ new/src/hotspot/share/prims/jvm.cpp 2020-08-07 13:16:07.566170290 +0200 @@ -502,7 +502,7 @@ JVM_LEAF(jlong, JVM_MaxObjectInspectionAge(void)) JVMWrapper("JVM_MaxObjectInspectionAge"); - return Universe::heap()->millis_since_last_gc(); + return Universe::heap()->millis_since_last_whole_heap_examined(); JVM_END