--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-11-12 10:37:38.936627350 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-11-12 10:37:38.510614156 +0100 @@ -1151,6 +1151,8 @@ } void G1CollectedHeap::resize_heap_if_necessary() { + assert_at_safepoint_on_vm_thread(); + // Capacity, free and used after the GC counted as full regions to // include the waste in the following calculations. const size_t capacity_after_gc = capacity(); @@ -1973,6 +1975,7 @@ switch (cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_g1_humongous_allocation: return true; + case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent; default: return is_user_requested_concurrent_full_gc(cause); } } --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2018-11-12 10:37:40.583678361 +0100 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2018-11-12 10:37:40.157665167 +0100 @@ -714,20 +714,34 @@ } _free_regions_at_end_of_collection = _g1h->num_free_regions(); - // IHOP control wants to know the expected young gen length if it were not - // restrained by the heap reserve. Using the actual length would make the - // prediction too small and the limit the young gen every time we get to the - // predicted target occupancy. - size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); + update_rs_lengths_prediction(); - update_ihop_prediction(app_time_ms / 1000.0, - _bytes_allocated_in_old_since_last_gc, - last_unrestrained_young_length * HeapRegion::GrainBytes, - this_pause_was_young_only); - _bytes_allocated_in_old_since_last_gc = 0; + // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely + // that in this case we are not running in a "normal" operating mode. + if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { + // IHOP control wants to know the expected young gen length if it were not + // restrained by the heap reserve. Using the actual length would make the + // prediction too small and the limit the young gen every time we get to the + // predicted target occupancy. + size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); + + update_ihop_prediction(app_time_ms / 1000.0, + _bytes_allocated_in_old_since_last_gc, + last_unrestrained_young_length * HeapRegion::GrainBytes, + this_pause_was_young_only); + _bytes_allocated_in_old_since_last_gc = 0; - _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); + _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); + } else { + // Any garbage collection triggered as periodic collection resets the time-to-mixed + // measurement. Periodic collection typically means that the application is "inactive", i.e. + // the marking threads may have received an uncharacterisic amount of cpu time + // for completing the marking, i.e. are faster than expected. + // This skews the predicted marking length towards smaller values which might cause + // the mark start being too late. + _initial_mark_to_mixed.reset(); + } // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; @@ -1078,7 +1092,9 @@ _initial_mark_to_mixed.add_pause(end - start); break; case InitialMarkGC: - _initial_mark_to_mixed.record_initial_mark_end(end); + if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { + _initial_mark_to_mixed.record_initial_mark_end(end); + } break; case MixedGC: _initial_mark_to_mixed.record_mixed_gc_start(start); --- old/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp 2018-11-12 10:37:42.185727978 +0100 +++ new/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp 2018-11-12 10:37:41.765714970 +0100 @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" +#include "gc/g1/g1ConcurrentMark.inline.hpp" +#include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp" #include "gc/g1/heapRegion.inline.hpp" @@ -37,7 +39,8 @@ _monitor(Mutex::nonleaf, "G1YoungRemSetSamplingThread monitor", true, - Monitor::_safepoint_check_never) { + Monitor::_safepoint_check_never), + _last_periodic_gc_attempt_s(os::elapsedTime()) { set_name("G1 Young RemSet Sampling"); create_and_start(); } @@ -45,11 +48,39 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() { MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); if (!should_terminate()) { - uintx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be? + uintx waitms = G1ConcRefinementServiceIntervalMillis; _monitor.wait(Mutex::_no_safepoint_check_flag, waitms); } } +bool G1YoungRemSetSamplingThread::should_start_periodic_gc() { + // If we are currently in a concurrent mark we are going to uncommit memory soon. + if (G1CollectedHeap::heap()->concurrent_mark()->cm_thread()->during_cycle()) { + log_debug(gc, periodic)("Concurrent cycle in progress. Skipping."); + return false; + } + + // Check if enough time has passed since the last GC. + uintx time_since_last_gc; + if ((G1PeriodicGCInterval == 0) || + ((time_since_last_gc = (uintx)Universe::heap()->millis_since_last_gc()) < G1PeriodicGCInterval)) { + log_debug(gc, periodic)("Last GC occurred " UINTX_FORMAT "ms earlier which is higher than threshold " UINTX_FORMAT "ms. Skipping.", + time_since_last_gc, G1PeriodicGCInterval); + return false; + } + + // Check if load is lower than max. + double recent_load; + if ((G1PeriodicGCSystemLoadThreshold > 0) && + (os::loadavg(&recent_load, 1) == -1 || recent_load > G1PeriodicGCSystemLoadThreshold)) { + log_debug(gc, periodic)("Load %1.2f is higher than threshold " UINTX_FORMAT ". Skipping.", + recent_load, G1PeriodicGCSystemLoadThreshold); + return false; + } + + return true; +} + void G1YoungRemSetSamplingThread::run_service() { double vtime_start = os::elapsedVTime(); @@ -62,6 +93,14 @@ _vtime_accum = 0.0; } + if ((os::elapsedTime() - _last_periodic_gc_attempt_s) > (G1PeriodicGCInterval / 1000.0)) { + log_debug(gc, periodic)("Checking for periodic GC."); + if (should_start_periodic_gc()) { + Universe::heap()->collect(GCCause::_g1_periodic_collection); + } + _last_periodic_gc_attempt_s = os::elapsedTime(); + } + sleep_before_next_cycle(); } } --- old/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.hpp 2018-11-12 10:37:43.772777130 +0100 +++ new/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.hpp 2018-11-12 10:37:43.352764122 +0100 @@ -43,6 +43,10 @@ private: Monitor _monitor; + double _last_periodic_gc_attempt_s; + + double _vtime_accum; // Accumulated virtual time. + void sample_young_list_rs_lengths(); void run_service(); @@ -50,7 +54,7 @@ void sleep_before_next_cycle(); - double _vtime_accum; // Accumulated virtual time. + bool should_start_periodic_gc(); public: G1YoungRemSetSamplingThread(); --- old/src/hotspot/share/gc/g1/g1_globals.hpp 2018-11-12 10:37:45.351826035 +0100 +++ new/src/hotspot/share/gc/g1/g1_globals.hpp 2018-11-12 10:37:44.930812996 +0100 @@ -302,6 +302,23 @@ "Verify the code root lists attached to each heap region.") \ \ develop(bool, G1VerifyBitmaps, false, \ - "Verifies the consistency of the marking bitmaps") + "Verifies the consistency of the marking bitmaps") \ + \ + manageable(uintx, G1PeriodicGCInterval, 5 * 60 * 1000, \ + "Number of milliseconds after a previous GC to wait before " \ + "triggering a periodic gc. A value of zero disables periodically "\ + "enforced gc cycles.") \ + range(0, max_jlong) \ + \ + product(bool, G1PeriodicGCInvokesConcurrent, true, \ + "Determines the kind of periodic GC. Set to true to have G1 " \ + "perform a concurrent GC as periodic GC, otherwise use a STW " \ + "Full GC.") \ + \ + manageable(uintx, G1PeriodicGCSystemLoadThreshold, 0, \ + "Maximum recent system wide system load as returned by the 1m " \ + "value of getloadavg() at which G1 triggers a periodic GC. A " \ + "load above this value cancels a given periodic GC. A value of " \ + "zero disables this check.") \ #endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP --- old/src/hotspot/share/gc/shared/gcCause.cpp 2018-11-12 10:37:46.939875219 +0100 +++ new/src/hotspot/share/gc/shared/gcCause.cpp 2018-11-12 10:37:46.519862211 +0100 @@ -102,6 +102,9 @@ case _g1_humongous_allocation: return "G1 Humongous Allocation"; + case _g1_periodic_collection: + return "G1 Periodic Collection"; + case _dcmd_gc_run: return "Diagnostic Command"; --- old/src/hotspot/share/gc/shared/gcCause.hpp 2018-11-12 10:37:48.539924774 +0100 +++ new/src/hotspot/share/gc/shared/gcCause.hpp 2018-11-12 10:37:48.115911642 +0100 @@ -75,6 +75,7 @@ _g1_inc_collection_pause, _g1_humongous_allocation, + _g1_periodic_collection, _dcmd_gc_run, --- old/src/hotspot/share/gc/shared/vmGCOperations.cpp 2018-11-12 10:37:50.121973772 +0100 +++ new/src/hotspot/share/gc/shared/vmGCOperations.cpp 2018-11-12 10:37:49.700960733 +0100 @@ -81,7 +81,6 @@ } bool VM_GC_Operation::doit_prologue() { - assert(Thread::current()->is_Java_thread(), "just checking"); assert(((_gc_cause != GCCause::_no_gc) && (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); @@ -110,7 +109,6 @@ void VM_GC_Operation::doit_epilogue() { - assert(Thread::current()->is_Java_thread(), "just checking"); // Clean up old interpreter OopMap entries that were replaced // during the GC thread root traversal. OopMapCache::cleanup_old_entries(); --- old/src/hotspot/share/logging/logTag.hpp 2018-11-12 10:37:51.714023079 +0100 +++ new/src/hotspot/share/logging/logTag.hpp 2018-11-12 10:37:51.290009947 +0100 @@ -119,6 +119,7 @@ LOG_TAG(patch) \ LOG_TAG(path) \ LOG_TAG(perf) \ + LOG_TAG(periodic) \ LOG_TAG(phases) \ LOG_TAG(plab) \ LOG_TAG(preview) /* Trace loading of preview feature types */ \ --- old/test/hotspot/jtreg/TEST.groups 2018-11-12 10:37:53.321072852 +0100 +++ new/test/hotspot/jtreg/TEST.groups 2018-11-12 10:37:52.884059317 +0100 @@ -178,6 +178,7 @@ gc/epsilon/ \ gc/g1/ \ -gc/g1/ihop/TestIHOPErgo.java + -gc/g1/TestTimelyCompaction.java tier1_gc_2 = \ gc/ \ --- old/test/hotspot/jtreg/gc/g1/ihop/TestIHOPErgo.java 2018-11-12 10:37:54.909122035 +0100 +++ new/test/hotspot/jtreg/gc/g1/ihop/TestIHOPErgo.java 2018-11-12 10:37:54.467108346 +0100 @@ -81,7 +81,7 @@ runTest(128, 100, false); runTest(256, 50, false); runTest(512, 30, false); - runTest(64, 50, true); + runTest(64, 100, true); runTest(128, 200, true); runTest(256, 100, true); runTest(512, 50, true); --- /dev/null 2018-11-09 16:18:02.252665545 +0100 +++ new/test/hotspot/jtreg/gc/g1/TestPeriodicCollection.java 2018-11-12 10:37:56.061157715 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestPeriodicCollection + * @requires vm.gc.G1 + * @summary Verify that heap shrinks when the application is idle. + * @library /test/lib / + * @modules java.base/jdk.internal.misc + * @modules java.management/sun.management + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xms32M -Xmx128M -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=25 -XX:+UseG1GC -XX:G1PeriodicGCInterval=3000 -XX:+G1PeriodicGCInvokesConcurrent -Xlog:gc,gc+periodic=debug,gc+ergo+heap=debug TestPeriodicCollection + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xms32M -Xmx128M -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=25 -XX:+UseG1GC -XX:G1PeriodicGCInterval=3000 -XX:-G1PeriodicGCInvokesConcurrent -Xlog:gc,gc+periodic=debug,gc+ergo+heap=debug TestPeriodicCollection + */ + +import com.sun.management.HotSpotDiagnosticMXBean; + +import gc.testlibrary.Helpers; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.util.ArrayList; +import java.util.List; +import java.text.NumberFormat; +import static jdk.test.lib.Asserts.*; + +import sun.hotspot.WhiteBox; + +public class TestPeriodicCollection { + + public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio"; + public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio"; + + private static final List> garbage = new ArrayList(); + + private static final long TOTAL_MEMORY = Runtime.getRuntime().totalMemory(); + private static final long MAX_MEMORY = Runtime.getRuntime().maxMemory(); + private static final int IDLE_TIME = 10 * 1000; + + private static final int ARR_SIZE = 64 * 1024; // 64k + private static final int LISTS_COUNT = 10; + private static final int ARR_COUNT = (int) ((TOTAL_MEMORY / ARR_SIZE) / LISTS_COUNT); + + public static void main(String[] args) { + if (ARR_COUNT == 0) { + System.out.println("Skipped. Heap is too small"); + return; + } + + if (TOTAL_MEMORY + ARR_SIZE * ARR_COUNT > MAX_MEMORY) { + System.out.println("Skipped. Initial heap size is too close to max heap size."); + return; + } + + new TestPeriodicCollection().test(); + } + + private final void test() { + WhiteBox.getWhiteBox().fullGC(); + + MemoryUsagePrinter.printMemoryUsage("init"); + + allocate(); + MemoryUsagePrinter.printMemoryUsage("allocated"); + MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + + garbage.clear(); + try { + Thread.sleep(IDLE_TIME); + } catch (InterruptedException ie) { + System.err.println("Skipped. Failed to wait for idle collection"); + } + MemoryUsagePrinter.printMemoryUsage("free"); + MemoryUsage muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + + assertLessThan(muFree.getCommitted(), muFull.getCommitted(), String.format( + "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n" + + "%s = %s%n%s = %s", + MIN_FREE_RATIO_FLAG_NAME, + ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class) + .getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(), + MAX_FREE_RATIO_FLAG_NAME, + ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class) + .getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue() + )); + } + + private void allocate() { + + for (int i = 0; i < LISTS_COUNT; i++) { + List stuff = new ArrayList(); + allocateList(stuff, ARR_COUNT, ARR_SIZE); + MemoryUsagePrinter.printMemoryUsage("allocate #" + (i+1)); + garbage.add(stuff); + } + } + + private static void allocateList(List garbage, int count, int size) { + for (int i = 0; i < count; i++) { + garbage.add(new byte[size]); + } + } +} + +/** + * Prints memory usage to standard output + */ +class MemoryUsagePrinter { + + public static final NumberFormat NF = Helpers.numberFormatter(); + + public static void printMemoryUsage(String label) { + MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted(); + System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n", + label, + NF.format(memusage.getInit()), + NF.format(memusage.getUsed()), + NF.format(memusage.getCommitted()), + freeratio * 100 + ); + } +}