--- old/make/autoconf/hotspot.m4 2020-01-17 17:08:21.592136459 +0100 +++ new/make/autoconf/hotspot.m4 2020-01-17 17:08:21.488136465 +0100 @@ -25,7 +25,7 @@ # All valid JVM features, regardless of platform VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \ - graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \ + graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \ static-build link-time-opt aot jfr" # Deprecated JVM features (these are ignored, but with a warning) @@ -338,6 +338,15 @@ fi fi + # Only enable Shenandoah on supported arches + AC_MSG_CHECKING([if shenandoah can be built]) + if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then + AC_MSG_RESULT([yes]) + else + DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES shenandoahgc" + AC_MSG_RESULT([no, platform not supported]) + fi + # Only enable ZGC on supported platforms AC_MSG_CHECKING([if zgc can be built]) if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then @@ -349,7 +358,7 @@ # Disable unsupported GCs for Zero if HOTSPOT_CHECK_JVM_VARIANT(zero); then - DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc" + DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc shenandoahgc zgc" fi # Turn on additional features based on other parts of configure @@ -483,7 +492,7 @@ fi # All variants but minimal (and custom) get these features - NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs zgc" + NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc" AC_MSG_CHECKING([if cds should be enabled]) if test "x$ENABLE_CDS" = "xtrue"; then --- old/make/hotspot/gensrc/GensrcAdlc.gmk 2020-01-17 17:08:22.196136426 +0100 +++ new/make/hotspot/gensrc/GensrcAdlc.gmk 2020-01-17 17:08:22.094136431 +0100 @@ -136,6 +136,12 @@ $d/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH).ad \ ))) + ifeq ($(call check-jvm-feature, shenandoahgc), true) + AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \ + $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \ + ))) + endif + SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad INSERT_FILENAME_AWK_SCRIPT := \ --- old/make/hotspot/lib/JvmFeatures.gmk 2020-01-17 17:08:22.774136394 +0100 +++ new/make/hotspot/lib/JvmFeatures.gmk 2020-01-17 17:08:22.673136399 +0100 @@ -166,6 +166,11 @@ JVM_EXCLUDE_PATTERNS += gc/z endif +ifneq ($(call check-jvm-feature, shenandoahgc), true) + JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0 + JVM_EXCLUDE_PATTERNS += gc/shenandoah +endif + ifneq ($(call check-jvm-feature, jfr), true) JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0 JVM_EXCLUDE_PATTERNS += jfr --- old/make/hotspot/lib/JvmOverrideFiles.gmk 2020-01-17 17:08:23.353136362 +0100 +++ new/make/hotspot/lib/JvmOverrideFiles.gmk 2020-01-17 17:08:23.255136367 +0100 @@ -36,6 +36,11 @@ BUILD_LIBJVM_assembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized BUILD_LIBJVM_cardTableBarrierSetAssembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized BUILD_LIBJVM_interp_masm_x86.cpp_CXXFLAGS := -Wno-uninitialized + ifeq ($(DEBUG_LEVEL), release) + # Need extra inlining to collapse all marking code into the hot marking loop + BUILD_LIBJVM_shenandoahConcurrentMark.cpp_CXXFLAGS := --param inline-unit-growth=1000 + BUILD_LIBJVM_shenandoahTraversalGC.cpp_CXXFLAGS := --param inline-unit-growth=1000 + endif endif LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NONE) --- old/src/hotspot/cpu/aarch64/aarch64.ad 2020-01-17 17:08:23.929136330 +0100 +++ new/src/hotspot/cpu/aarch64/aarch64.ad 2020-01-17 17:08:23.827136336 +0100 @@ -1280,6 +1280,10 @@ case Op_GetAndSetN: case Op_GetAndAddI: case Op_GetAndAddL: +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: +#endif return true; case Op_CompareAndExchangeI: case Op_CompareAndExchangeN: --- old/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp 2020-01-17 17:08:24.678136289 +0100 +++ new/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp 2020-01-17 17:08:24.578136294 +0100 @@ -2811,7 +2811,11 @@ void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { - assert(patch_code == lir_patch_none, "Patch code not supported"); + if (patch_code != lir_patch_none) { + deoptimize_trap(info); + return; + } + __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); } --- old/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp 2020-01-17 17:08:25.286136255 +0100 +++ new/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp 2020-01-17 17:08:25.183136261 +0100 @@ -872,8 +872,8 @@ } // Get mirror and store it in the frame as GC root for this Method* - __ load_mirror(rscratch1, rmethod); - __ stp(rscratch1, zr, Address(sp, 4 * wordSize)); + __ load_mirror(r10, rmethod); + __ stp(r10, zr, Address(sp, 4 * wordSize)); __ ldr(rcpool, Address(rmethod, Method::const_offset())); __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); --- old/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp 2020-01-17 17:08:25.877136223 +0100 +++ new/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp 2020-01-17 17:08:25.776136228 +0100 @@ -36,6 +36,7 @@ #include "gc/shared/c1/barrierSetC1.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" +#include "utilities/macros.hpp" #include "vmreg_x86.inline.hpp" #ifdef ASSERT @@ -674,6 +675,11 @@ if (type == T_OBJECT || type == T_ARRAY) { cmp_value.load_item_force(FrameMap::rax_oop_opr); new_value.load_item(); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), new_register(T_OBJECT), new_register(T_OBJECT)); + } else +#endif __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); } else if (type == T_INT) { cmp_value.load_item_force(FrameMap::rax_opr); @@ -699,6 +705,12 @@ // Because we want a 2-arg form of xchg and xadd __ move(value.result(), result); assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + LIR_Opr tmp = is_oop ? new_register(type) : LIR_OprFact::illegalOpr; + __ xchg(addr, result, result, tmp); + } else +#endif __ xchg(addr, result, result, LIR_OprFact::illegalOpr); return result; } --- old/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2020-01-17 17:08:26.469136190 +0100 +++ new/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2020-01-17 17:08:26.372136195 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "code/nativeInst.hpp" #include "code/vtableStubs.hpp" #include "gc/shared/gcLocker.hpp" +#include "gc/shared/collectedHeap.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" @@ -1434,6 +1435,102 @@ } } +// Pin incoming array argument of java critical method +static void pin_critical_native_array(MacroAssembler* masm, + VMRegPair reg, + int& pinned_slot) { + assert(UseShenandoahGC, "only supported in Shenandoah for now"); + __ block_comment("pin_critical_native_array {"); + Register tmp_reg = rax; + + Label is_null; + VMRegPair tmp; + VMRegPair in_reg = reg; + bool on_stack = false; + + tmp.set_ptr(tmp_reg->as_VMReg()); + if (reg.first()->is_stack()) { + // Load the arg up from the stack + move_ptr(masm, reg, tmp); + reg = tmp; + on_stack = true; + } else { + __ movptr(rax, reg.first()->as_Register()); + } + __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); + __ jccb(Assembler::equal, is_null); + + __ push(c_rarg0); + __ push(c_rarg1); + __ push(c_rarg2); + __ push(c_rarg3); +#ifdef _WIN64 + // caller-saved registers on Windows + __ push(r10); + __ push(r11); +#else + __ push(c_rarg4); + __ push(c_rarg5); +#endif + + if (reg.first()->as_Register() != c_rarg1) { + __ movptr(c_rarg1, reg.first()->as_Register()); + } + __ movptr(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object))); + +#ifdef _WIN64 + __ pop(r11); + __ pop(r10); +#else + __ pop(c_rarg5); + __ pop(c_rarg4); +#endif + __ pop(c_rarg3); + __ pop(c_rarg2); + __ pop(c_rarg1); + __ pop(c_rarg0); + + if (on_stack) { + __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax); + __ bind(is_null); + } else { + __ movptr(reg.first()->as_Register(), rax); + + // save on stack for unpinning later + __ bind(is_null); + assert(reg.first()->is_Register(), "Must be a register"); + int offset = pinned_slot * VMRegImpl::stack_slot_size; + pinned_slot += VMRegImpl::slots_per_word; + __ movq(Address(rsp, offset), rax); + } + __ block_comment("} pin_critical_native_array"); +} + +// Unpin array argument of java critical method +static void unpin_critical_native_array(MacroAssembler* masm, + VMRegPair reg, + int& pinned_slot) { + assert(UseShenandoahGC, "only supported in Shenandoah for now"); + __ block_comment("unpin_critical_native_array {"); + Label is_null; + + if (reg.first()->is_stack()) { + __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first()))); + } else { + int offset = pinned_slot * VMRegImpl::stack_slot_size; + pinned_slot += VMRegImpl::slots_per_word; + __ movq(c_rarg1, Address(rsp, offset)); + } + __ testptr(c_rarg1, c_rarg1); + __ jccb(Assembler::equal, is_null); + + __ movptr(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object))); + + __ bind(is_null); + __ block_comment("} unpin_critical_native_array"); +} // Check GCLocker::needs_gc and enter the runtime if it's true. This // keeps a new JNI critical region from starting until a GC has been @@ -2130,7 +2227,7 @@ const Register oop_handle_reg = r14; - if (is_critical_native) { + if (is_critical_native && !Universe::heap()->supports_object_pinning()) { check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } @@ -2187,6 +2284,11 @@ // the incoming and outgoing registers are offset upwards and for // critical natives they are offset down. GrowableArray arg_order(2 * total_in_args); + // Inbound arguments that need to be pinned for critical natives + GrowableArray pinned_args(total_in_args); + // Current stack slot for storing register based array argument + int pinned_slot = oop_handle_offset; + VMRegPair tmp_vmreg; tmp_vmreg.set2(rbx->as_VMReg()); @@ -2234,6 +2336,12 @@ switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { + // pin before unpack + if (Universe::heap()->supports_object_pinning()) { + assert(pinned_slot <= stack_slots, "overflow"); + pin_critical_native_array(masm, in_regs[i], pinned_slot); + pinned_args.append(i); + } unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); c_arg++; #ifdef ASSERT @@ -2450,6 +2558,19 @@ default : ShouldNotReachHere(); } + // unpin pinned arguments + pinned_slot = oop_handle_offset; + if (pinned_args.length() > 0) { + // save return value that may be overwritten otherwise. + save_native_result(masm, ret_type, stack_slots); + for (int index = 0; index < pinned_args.length(); index ++) { + int i = pinned_args.at(index); + assert(pinned_slot <= stack_slots, "overflow"); + unpin_critical_native_array(masm, in_regs[i], pinned_slot); + } + restore_native_result(masm, ret_type, stack_slots); + } + // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: --- old/src/hotspot/share/adlc/formssel.cpp 2020-01-17 17:08:27.098136155 +0100 +++ new/src/hotspot/share/adlc/formssel.cpp 2020-01-17 17:08:26.998136161 +0100 @@ -778,6 +778,10 @@ !strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") || !strcmp(_matrule->_rChild->_opType,"LoadBarrierWeakSlowReg") || #endif +#if INCLUDE_SHENANDOAHGC + !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") || + !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") || +#endif !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") || !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN"))) return true; else if ( is_ideal_load() == Form::idealP ) return true; @@ -3502,6 +3506,9 @@ "CompareAndSwapB", "CompareAndSwapS", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", "WeakCompareAndSwapB", "WeakCompareAndSwapS", "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN", "CompareAndExchangeB", "CompareAndExchangeS", "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN", +#if INCLUDE_SHENANDOAHGC + "ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN", +#endif "StoreCM", "ClearArray", "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP", --- old/src/hotspot/share/ci/ciInstanceKlass.cpp 2020-01-17 17:08:27.733136120 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.cpp 2020-01-17 17:08:27.636136126 +0100 @@ -554,6 +554,12 @@ _has_injected_fields = has_injected_fields; } +bool ciInstanceKlass::has_object_fields() const { + GUARDED_VM_ENTRY( + return get_instanceKlass()->nonstatic_oop_map_size() > 0; + ); +} + // ------------------------------------------------------------------ // ciInstanceKlass::find_method // --- old/src/hotspot/share/ci/ciInstanceKlass.hpp 2020-01-17 17:08:28.319136088 +0100 +++ new/src/hotspot/share/ci/ciInstanceKlass.hpp 2020-01-17 17:08:28.219136094 +0100 @@ -202,6 +202,8 @@ return _has_injected_fields > 0 ? true : false; } + bool has_object_fields() const; + // nth nonstatic field (presented by ascending address) ciField* nonstatic_field_at(int i) { assert(_nonstatic_fields != NULL, ""); --- old/src/hotspot/share/code/codeBlob.cpp 2020-01-17 17:08:28.895136056 +0100 +++ new/src/hotspot/share/code/codeBlob.cpp 2020-01-17 17:08:28.795136062 +0100 @@ -31,6 +31,7 @@ #include "code/vtableStubs.hpp" #include "compiler/disassembler.hpp" #include "interpreter/bytecode.hpp" +#include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" #include "memory/heap.hpp" #include "memory/resourceArea.hpp" --- old/src/hotspot/share/code/codeCache.hpp 2020-01-17 17:08:29.477136024 +0100 +++ new/src/hotspot/share/code/codeCache.hpp 2020-01-17 17:08:29.380136030 +0100 @@ -73,6 +73,7 @@ class OopClosure; class KlassDepChange; +class ShenandoahParallelCodeHeapIterator; class CodeCache : AllStatic { friend class VMStructs; @@ -80,6 +81,7 @@ template friend class CodeBlobIterator; friend class WhiteBox; friend class CodeCacheLoader; + friend class ShenandoahParallelCodeHeapIterator; private: // CodeHeaps of the cache static GrowableArray* _heaps; --- old/src/hotspot/share/gc/shared/barrierSetConfig.hpp 2020-01-17 17:08:30.061135992 +0100 +++ new/src/hotspot/share/gc/shared/barrierSetConfig.hpp 2020-01-17 17:08:29.961135998 +0100 @@ -32,6 +32,7 @@ f(CardTableBarrierSet) \ EPSILONGC_ONLY(f(EpsilonBarrierSet)) \ G1GC_ONLY(f(G1BarrierSet)) \ + SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet)) \ ZGC_ONLY(f(ZBarrierSet)) #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ --- old/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp 2020-01-17 17:08:30.654135960 +0100 +++ new/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp 2020-01-17 17:08:30.552135965 +0100 @@ -36,6 +36,9 @@ #if INCLUDE_G1GC #include "gc/g1/g1BarrierSet.inline.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#endif #if INCLUDE_ZGC #include "gc/z/zBarrierSet.inline.hpp" #endif --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-01-17 17:08:31.229135928 +0100 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-01-17 17:08:31.129135933 +0100 @@ -89,6 +89,7 @@ // CMSHeap // G1CollectedHeap // ParallelScavengeHeap +// ShenandoahHeap // ZCollectedHeap // class CollectedHeap : public CHeapObj { @@ -178,7 +179,8 @@ CMS, G1, Epsilon, - Z + Z, + Shenandoah }; static inline size_t filler_array_max_size() { --- old/src/hotspot/share/gc/shared/gcCause.cpp 2020-01-17 17:08:31.812135896 +0100 +++ new/src/hotspot/share/gc/shared/gcCause.cpp 2020-01-17 17:08:31.708135901 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,6 +105,21 @@ case _dcmd_gc_run: return "Diagnostic Command"; + case _shenandoah_allocation_failure_evac: + return "Allocation Failure During Evacuation"; + + case _shenandoah_stop_vm: + return "Stopping VM"; + + case _shenandoah_concurrent_gc: + return "Concurrent GC"; + + case _shenandoah_traversal_gc: + return "Traversal GC"; + + case _shenandoah_upgrade_to_full_gc: + return "Upgrade To Full GC"; + case _z_timer: return "Timer"; --- old/src/hotspot/share/gc/shared/gcCause.hpp 2020-01-17 17:08:32.387135864 +0100 +++ new/src/hotspot/share/gc/shared/gcCause.hpp 2020-01-17 17:08:32.287135870 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,12 @@ _dcmd_gc_run, + _shenandoah_stop_vm, + _shenandoah_allocation_failure_evac, + _shenandoah_concurrent_gc, + _shenandoah_traversal_gc, + _shenandoah_upgrade_to_full_gc, + _z_timer, _z_warmup, _z_allocation_rate, @@ -121,7 +127,8 @@ // _allocation_failure is the generic cause a collection for allocation failure // _adaptive_size_policy is for a collecton done before a full GC return (cause == GCCause::_allocation_failure || - cause == GCCause::_adaptive_size_policy); + cause == GCCause::_adaptive_size_policy || + cause == GCCause::_shenandoah_allocation_failure_evac); } // Return a string describing the GCCause. --- old/src/hotspot/share/gc/shared/gcConfig.cpp 2020-01-17 17:08:32.955135833 +0100 +++ new/src/hotspot/share/gc/shared/gcConfig.cpp 2020-01-17 17:08:32.854135838 +0100 @@ -43,6 +43,9 @@ #if INCLUDE_SERIALGC #include "gc/serial/serialArguments.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahArguments.hpp" +#endif #if INCLUDE_ZGC #include "gc/z/zArguments.hpp" #endif @@ -57,23 +60,25 @@ _flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {} }; - CMSGC_ONLY(static CMSArguments cmsArguments;) - EPSILONGC_ONLY(static EpsilonArguments epsilonArguments;) - G1GC_ONLY(static G1Arguments g1Arguments;) -PARALLELGC_ONLY(static ParallelArguments parallelArguments;) - SERIALGC_ONLY(static SerialArguments serialArguments;) - ZGC_ONLY(static ZArguments zArguments;) + CMSGC_ONLY(static CMSArguments cmsArguments;) + EPSILONGC_ONLY(static EpsilonArguments epsilonArguments;) + G1GC_ONLY(static G1Arguments g1Arguments;) + PARALLELGC_ONLY(static ParallelArguments parallelArguments;) + SERIALGC_ONLY(static SerialArguments serialArguments;) +SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;) + ZGC_ONLY(static ZArguments zArguments;) // Table of supported GCs, for translating between command // line flag, CollectedHeap::Name and GCArguments instance. static const SupportedGC SupportedGCs[] = { - CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc")) - EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC, CollectedHeap::Epsilon, epsilonArguments, "epsilon gc")) - G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc")) - PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc")) - PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc")) - SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc")) - ZGC_ONLY_ARG(SupportedGC(UseZGC, CollectedHeap::Z, zArguments, "z gc")) + CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc")) + EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC, CollectedHeap::Epsilon, epsilonArguments, "epsilon gc")) + G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc")) + PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc")) + PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc")) + SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc")) +SHENANDOAHGC_ONLY_ARG(SupportedGC(UseShenandoahGC, CollectedHeap::Shenandoah, shenandoahArguments, "shenandoah gc")) + ZGC_ONLY_ARG(SupportedGC(UseZGC, CollectedHeap::Z, zArguments, "z gc")) }; #define FOR_EACH_SUPPORTED_GC(var) \ @@ -90,14 +95,15 @@ bool GCConfig::_gc_selected_ergonomically = false; void GCConfig::fail_if_unsupported_gc_is_selected() { - NOT_CMSGC( FAIL_IF_SELECTED(UseConcMarkSweepGC, true)); - NOT_EPSILONGC( FAIL_IF_SELECTED(UseEpsilonGC, true)); - NOT_G1GC( FAIL_IF_SELECTED(UseG1GC, true)); - NOT_PARALLELGC(FAIL_IF_SELECTED(UseParallelGC, true)); - NOT_PARALLELGC(FAIL_IF_SELECTED(UseParallelOldGC, true)); - NOT_SERIALGC( FAIL_IF_SELECTED(UseSerialGC, true)); - NOT_SERIALGC( FAIL_IF_SELECTED(UseParallelOldGC, false)); - NOT_ZGC( FAIL_IF_SELECTED(UseZGC, true)); + NOT_CMSGC( FAIL_IF_SELECTED(UseConcMarkSweepGC, true)); + NOT_EPSILONGC( FAIL_IF_SELECTED(UseEpsilonGC, true)); + NOT_G1GC( FAIL_IF_SELECTED(UseG1GC, true)); + NOT_PARALLELGC( FAIL_IF_SELECTED(UseParallelGC, true)); + NOT_PARALLELGC( FAIL_IF_SELECTED(UseParallelOldGC, true)); + NOT_SERIALGC( FAIL_IF_SELECTED(UseSerialGC, true)); + NOT_SERIALGC( FAIL_IF_SELECTED(UseParallelOldGC, false)); + NOT_SHENANDOAHGC(FAIL_IF_SELECTED(UseShenandoahGC, true)); + NOT_ZGC( FAIL_IF_SELECTED(UseZGC, true)); } void GCConfig::select_gc_ergonomically() { --- old/src/hotspot/share/gc/shared/gcConfiguration.cpp 2020-01-17 17:08:33.521135802 +0100 +++ new/src/hotspot/share/gc/shared/gcConfiguration.cpp 2020-01-17 17:08:33.424135807 +0100 @@ -43,7 +43,7 @@ return ParNew; } - if (UseZGC) { + if (UseZGC || UseShenandoahGC) { return NA; } @@ -67,6 +67,10 @@ return Z; } + if (UseShenandoahGC) { + return Shenandoah; + } + return SerialOld; } --- old/src/hotspot/share/gc/shared/gcName.hpp 2020-01-17 17:08:34.095135770 +0100 +++ new/src/hotspot/share/gc/shared/gcName.hpp 2020-01-17 17:08:34.000135775 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,7 @@ G1Old, G1Full, Z, + Shenandoah, NA, GCNameEndSentinel }; @@ -58,6 +59,7 @@ case G1Old: return "G1Old"; case G1Full: return "G1Full"; case Z: return "Z"; + case Shenandoah: return "Shenandoah"; case NA: return "N/A"; default: ShouldNotReachHere(); return NULL; } --- old/src/hotspot/share/gc/shared/gc_globals.hpp 2020-01-17 17:08:34.673135738 +0100 +++ new/src/hotspot/share/gc/shared/gc_globals.hpp 2020-01-17 17:08:34.570135744 +0100 @@ -41,6 +41,9 @@ #if INCLUDE_SERIALGC #include "gc/serial/serial_globals.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoah_globals.hpp" +#endif #if INCLUDE_ZGC #include "gc/z/z_globals.hpp" #endif @@ -140,6 +143,22 @@ constraint, \ writeable)) \ \ + SHENANDOAHGC_ONLY(GC_SHENANDOAH_FLAGS( \ + develop, \ + develop_pd, \ + product, \ + product_pd, \ + diagnostic, \ + diagnostic_pd, \ + experimental, \ + notproduct, \ + manageable, \ + product_rw, \ + lp64_product, \ + range, \ + constraint, \ + writeable)) \ + \ ZGC_ONLY(GC_Z_FLAGS( \ develop, \ develop_pd, \ @@ -179,6 +198,9 @@ experimental(bool, UseZGC, false, \ "Use the Z garbage collector") \ \ + experimental(bool, UseShenandoahGC, false, \ + "Use the Shenandoah garbage collector") \ + \ product(uint, ParallelGCThreads, 0, \ "Number of parallel threads parallel gc will use") \ constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ --- old/src/hotspot/share/gc/shared/referenceProcessor.cpp 2020-01-17 17:08:35.260135706 +0100 +++ new/src/hotspot/share/gc/shared/referenceProcessor.cpp 2020-01-17 17:08:35.158135711 +0100 @@ -1156,7 +1156,7 @@ // Check assumption that an object is not potentially // discovered twice except by concurrent collectors that potentially // trace the same Reference object twice. - assert(UseConcMarkSweepGC || UseG1GC, + assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC, "Only possible with a concurrent marking collector"); return true; } --- old/src/hotspot/share/gc/shared/taskqueue.hpp 2020-01-17 17:08:35.860135673 +0100 +++ new/src/hotspot/share/gc/shared/taskqueue.hpp 2020-01-17 17:08:35.757135678 +0100 @@ -357,7 +357,8 @@ static int randomParkAndMiller(int* seed0); public: // Returns "true" if some TaskQueue in the set contains a task. - virtual bool peek() = 0; + virtual bool peek() = 0; + virtual size_t tasks() = 0; }; template class TaskQueueSetSuperImpl: public CHeapObj, public TaskQueueSetSuper { @@ -389,6 +390,7 @@ bool steal(uint queue_num, int* seed, E& t); bool peek(); + size_t tasks(); uint size() const { return _n; } }; @@ -414,6 +416,16 @@ return false; } +template +size_t GenericTaskQueueSet::tasks() { + size_t n = 0; + for (uint j = 0; j < _n; j++) { + n += _queues[j]->size(); + } + return n; +} + + // When to terminate from the termination protocol. class TerminatorTerminator: public CHeapObj { public: @@ -426,7 +438,7 @@ #undef TRACESPINNING class ParallelTaskTerminator: public StackObj { -private: +protected: uint _n_threads; TaskQueueSetSuper* _queue_set; @@ -462,7 +474,7 @@ // As above, but it also terminates if the should_exit_termination() // method of the terminator parameter returns true. If terminator is // NULL, then it is ignored. - bool offer_termination(TerminatorTerminator* terminator); + virtual bool offer_termination(TerminatorTerminator* terminator); // Reset the terminator, so that it may be reused again. // The caller is responsible for ensuring that this is done --- old/src/hotspot/share/gc/shared/vmStructs_gc.hpp 2020-01-17 17:08:36.443135641 +0100 +++ new/src/hotspot/share/gc/shared/vmStructs_gc.hpp 2020-01-17 17:08:36.342135646 +0100 @@ -50,6 +50,9 @@ #include "gc/serial/defNewGeneration.hpp" #include "gc/serial/vmStructs_serial.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/vmStructs_shenandoah.hpp" +#endif #if INCLUDE_ZGC #include "gc/z/vmStructs_z.hpp" #endif @@ -73,6 +76,9 @@ SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field, \ volatile_nonstatic_field, \ static_field)) \ + SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field, \ + volatile_nonstatic_field, \ + static_field)) \ ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field, \ volatile_nonstatic_field, \ static_field)) \ @@ -178,6 +184,9 @@ SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type, \ declare_toplevel_type, \ declare_integer_type)) \ + SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type, \ + declare_toplevel_type, \ + declare_integer_type)) \ ZGC_ONLY(VM_TYPES_ZGC(declare_type, \ declare_toplevel_type, \ declare_integer_type)) \ @@ -253,6 +262,8 @@ declare_constant_with_value)) \ SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant, \ declare_constant_with_value)) \ + SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant, \ + declare_constant_with_value)) \ ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant, \ declare_constant_with_value)) \ \ --- old/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp 2020-01-17 17:08:37.031135608 +0100 +++ new/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp 2020-01-17 17:08:36.932135614 +0100 @@ -129,7 +129,7 @@ ZBarrierSetC2State* s = bs->state(); if (s->load_barrier_count() >= 2) { Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]); - PhaseIdealLoop ideal_loop(igvn, true, false, true); + PhaseIdealLoop ideal_loop(igvn, LoopOptsZgcLastRound); if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); } } --- old/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp 2020-01-17 17:08:37.637135575 +0100 +++ new/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp 2020-01-17 17:08:37.538135580 +0100 @@ -53,6 +53,11 @@ return false; } + if (UseShenandoahGC) { + log_warning(jfr)("LeakProfiler is currently not supported in combination with Shenandoah GC"); + return false; + } + assert(!is_running(), "invariant"); assert(sample_count > 0, "invariant"); --- old/src/hotspot/share/jfr/metadata/metadata.xml 2020-01-17 17:08:38.217135543 +0100 +++ new/src/hotspot/share/jfr/metadata/metadata.xml 2020-01-17 17:08:38.120135548 +0100 @@ -922,6 +922,27 @@ + + + + + + + + + + + + + + + + + + + --- old/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp 2020-01-17 17:08:38.838135509 +0100 +++ new/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp 2020-01-17 17:08:38.738135514 +0100 @@ -63,6 +63,9 @@ #if INCLUDE_G1GC #include "gc/g1/g1HeapRegionEventSender.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahJfrSupport.hpp" +#endif /** * JfrPeriodic class @@ -574,3 +577,14 @@ event.set_flushingEnabled(UseCodeCacheFlushing); event.commit(); } + + +TRACE_REQUEST_FUNC(ShenandoahHeapRegionInformation) { +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + VM_ShenandoahSendHeapRegionInfoEvents op; + VMThread::execute(&op); + } +#endif +} + --- old/src/hotspot/share/memory/metaspace.hpp 2020-01-17 17:08:39.437135476 +0100 +++ new/src/hotspot/share/memory/metaspace.hpp 2020-01-17 17:08:39.337135481 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,6 +235,7 @@ class ClassLoaderMetaspace : public CHeapObj { friend class CollectedHeap; // For expand_and_allocate() friend class ZCollectedHeap; // For expand_and_allocate() + friend class ShenandoahHeap; // For expand_and_allocate() friend class Metaspace; friend class MetaspaceUtils; friend class metaspace::PrintCLDMetaspaceInfoClosure; --- old/src/hotspot/share/opto/arraycopynode.cpp 2020-01-17 17:08:40.016135444 +0100 +++ new/src/hotspot/share/opto/arraycopynode.cpp 2020-01-17 17:08:39.919135449 +0100 @@ -30,6 +30,9 @@ #include "opto/graphKit.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM), @@ -205,6 +208,11 @@ Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered); v = phase->transform(v); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && bt == T_OBJECT) { + v = ShenandoahBarrierSetC2::bsc2()->arraycopy_load_reference_barrier(phase, v); + } +#endif Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered); s = phase->transform(s); mem->set_memory_at(fieldidx, s); @@ -373,6 +381,11 @@ if (count > 0) { Node* v = LoadNode::make(*phase, forward_ctl, start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered); v = phase->transform(v); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && copy_type == T_OBJECT) { + v = ShenandoahBarrierSetC2::bsc2()->arraycopy_load_reference_barrier(phase, v); + } +#endif mem = StoreNode::make(*phase, forward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered); mem = phase->transform(mem); for (int i = 1; i < count; i++) { @@ -381,6 +394,11 @@ Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); v = LoadNode::make(*phase, forward_ctl, same_alias ? mem : start_mem_src, next_src, atp_src, value_type, copy_type, MemNode::unordered); v = phase->transform(v); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && copy_type == T_OBJECT) { + v = ShenandoahBarrierSetC2::bsc2()->arraycopy_load_reference_barrier(phase, v); + } +#endif mem = StoreNode::make(*phase, forward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered); mem = phase->transform(mem); } @@ -422,11 +440,21 @@ Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); Node* v = LoadNode::make(*phase, backward_ctl, same_alias ? mem : start_mem_src, next_src, atp_src, value_type, copy_type, MemNode::unordered); v = phase->transform(v); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && copy_type == T_OBJECT) { + v = ShenandoahBarrierSetC2::bsc2()->arraycopy_load_reference_barrier(phase, v); + } +#endif mem = StoreNode::make(*phase, backward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered); mem = phase->transform(mem); } Node* v = LoadNode::make(*phase, backward_ctl, same_alias ? mem : start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered); v = phase->transform(v); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && copy_type == T_OBJECT) { + v = ShenandoahBarrierSetC2::bsc2()->arraycopy_load_reference_barrier(phase, v); + } +#endif mem = StoreNode::make(*phase, backward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered); mem = phase->transform(mem); } else if(can_reshape) { @@ -485,7 +513,8 @@ } else { if (in(TypeFunc::Control) != ctl) { // we can't return new memory and control from Ideal at parse time - assert(!is_clonebasic(), "added control for clone?"); + assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?"); + phase->record_for_igvn(this); return false; } } --- old/src/hotspot/share/opto/cfgnode.hpp 2020-01-17 17:08:40.601135411 +0100 +++ new/src/hotspot/share/opto/cfgnode.hpp 2020-01-17 17:08:40.502135417 +0100 @@ -303,7 +303,6 @@ protected: ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r); Node* Ideal_common(PhaseGVN *phase, bool can_reshape); - Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn); Node* search_identical(int dist); public: @@ -391,6 +390,7 @@ virtual const RegMask &out_RegMask() const; Node* fold_compares(PhaseIterGVN* phase); static Node* up_one_dom(Node* curr, bool linear_only = false); + Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn); // Takes the type of val and filters it through the test represented // by if_proj and returns a more refined type if one is produced. --- old/src/hotspot/share/opto/classes.cpp 2020-01-17 17:08:41.174135380 +0100 +++ new/src/hotspot/share/opto/classes.cpp 2020-01-17 17:08:41.079135385 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,9 @@ #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // ---------------------------------------------------------------------------- // Build a table of virtual functions to map from Nodes to dense integer --- old/src/hotspot/share/opto/classes.hpp 2020-01-17 17:08:41.733135349 +0100 +++ new/src/hotspot/share/opto/classes.hpp 2020-01-17 17:08:41.638135354 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -265,6 +265,19 @@ macro(RoundFloat) macro(SafePoint) macro(SafePointScalarObject) +#if INCLUDE_SHENANDOAHGC +#define shmacro(x) macro(x) +#else +#define shmacro(x) optionalmacro(x) +#endif +shmacro(ShenandoahCompareAndExchangeP) +shmacro(ShenandoahCompareAndExchangeN) +shmacro(ShenandoahCompareAndSwapN) +shmacro(ShenandoahCompareAndSwapP) +shmacro(ShenandoahWeakCompareAndSwapN) +shmacro(ShenandoahWeakCompareAndSwapP) +shmacro(ShenandoahEnqueueBarrier) +shmacro(ShenandoahLoadReferenceBarrier) macro(SCMemProj) macro(SqrtD) macro(SqrtF) --- old/src/hotspot/share/opto/compile.cpp 2020-01-17 17:08:42.304135318 +0100 +++ new/src/hotspot/share/opto/compile.cpp 2020-01-17 17:08:42.204135323 +0100 @@ -82,6 +82,9 @@ #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // -------------------- Compile::mach_constant_base_node ----------------------- @@ -2104,7 +2107,7 @@ // PhaseIdealLoop is expensive so we only try it once we are // out of live nodes and we only try it again if the previous // helped got the number of nodes down significantly - PhaseIdealLoop ideal_loop( igvn, false, true ); + PhaseIdealLoop ideal_loop(igvn, LoopOptsNone); if (failing()) return; low_live_nodes = live_nodes(); _major_progress = true; @@ -2155,6 +2158,21 @@ } +bool Compile::optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode) { + if(loop_opts_cnt > 0) { + debug_only( int cnt = 0; ); + while(major_progress() && (loop_opts_cnt > 0)) { + TracePhase tp("idealLoop", &timers[_t_idealLoop]); + assert( cnt++ < 40, "infinite cycle in loop optimization" ); + PhaseIdealLoop ideal_loop(igvn, mode); + loop_opts_cnt--; + if (failing()) return false; + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); + } + } + return true; +} + //------------------------------Optimize--------------------------------------- // Given a graph, optimize it. void Compile::Optimize() { @@ -2193,10 +2211,10 @@ igvn.optimize(); } - print_method(PHASE_ITER_GVN1, 2); - if (failing()) return; + print_method(PHASE_ITER_GVN1, 2); + inline_incrementally(igvn); print_method(PHASE_INCREMENTAL_INLINE, 2); @@ -2245,7 +2263,7 @@ if (has_loops()) { // Cleanup graph (remove dead nodes). TracePhase tp("idealLoop", &timers[_t_idealLoop]); - PhaseIdealLoop ideal_loop( igvn, false, true ); + PhaseIdealLoop ideal_loop(igvn, LoopOptsNone); if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); if (failing()) return; } @@ -2280,7 +2298,7 @@ if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { { TracePhase tp("idealLoop", &timers[_t_idealLoop]); - PhaseIdealLoop ideal_loop( igvn, true ); + PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); if (failing()) return; @@ -2288,7 +2306,7 @@ // Loop opts pass if partial peeling occurred in previous pass if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); - PhaseIdealLoop ideal_loop( igvn, false ); + PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); if (failing()) return; @@ -2296,7 +2314,7 @@ // Loop opts pass for loop-unrolling before CCP if(major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); - PhaseIdealLoop ideal_loop( igvn, false ); + PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); } @@ -2332,16 +2350,8 @@ // Loop transforms on the ideal graph. Range Check Elimination, // peeling, unrolling, etc. - if(loop_opts_cnt > 0) { - debug_only( int cnt = 0; ); - while(major_progress() && (loop_opts_cnt > 0)) { - TracePhase tp("idealLoop", &timers[_t_idealLoop]); - assert( cnt++ < 40, "infinite cycle in loop optimization" ); - PhaseIdealLoop ideal_loop( igvn, true); - loop_opts_cnt--; - if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); - if (failing()) return; - } + if (!optimize_loops(loop_opts_cnt, igvn, LoopOptsDefault)) { + return; } #if INCLUDE_ZGC @@ -2383,6 +2393,15 @@ } } + print_method(PHASE_BEFORE_BARRIER_EXPAND, 2); + +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && ((ShenandoahBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2())->expand_barriers(this, igvn)) { + assert(failing(), "must bail out w/ explicit message"); + return; + } +#endif + if (opaque4_count() > 0) { C->remove_opaque4_nodes(igvn); igvn.optimize(); @@ -2823,6 +2842,17 @@ case Op_CallLeafNoFP: { assert (n->is_Call(), ""); CallNode *call = n->as_Call(); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { + uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); + if (call->req() > cnt) { + assert(call->req() == cnt+1, "only one extra input"); + Node* addp = call->in(cnt); + assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?"); + call->del_req(cnt); + } + } +#endif // Count call sites where the FP mode bit would have to be flipped. // Do not count uncommon runtime calls: // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, @@ -3387,6 +3417,28 @@ } break; } +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: + case Op_ShenandoahWeakCompareAndSwapN: + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: +#ifdef ASSERT + if( VerifyOptoOopOffsets ) { + MemNode* mem = n->as_Mem(); + // Check to see if address types have grounded out somehow. + const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); + ciInstanceKlass *k = tp->klass()->as_instance_klass(); + bool oop_offset_is_sane = k->contains_field_offset(tp->offset()); + assert( !tp || oop_offset_is_sane, "" ); + } +#endif + break; + case Op_ShenandoahLoadReferenceBarrier: + assert(false, "should have been expanded already"); + break; +#endif case Op_RangeCheck: { RangeCheckNode* rc = n->as_RangeCheck(); Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt); @@ -3823,10 +3875,18 @@ // Currently supported: // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) void Compile::verify_barriers() { -#if INCLUDE_G1GC - if (UseG1GC) { +#if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC + if (UseG1GC || UseShenandoahGC) { // Verify G1 pre-barriers + +#if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC + const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset() + : ShenandoahThreadLocalData::satb_mark_queue_active_offset()); +#elif INCLUDE_G1GC const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); +#else + const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); +#endif ResourceArea *area = Thread::current()->resource_area(); Unique_Node_List visited(area); --- old/src/hotspot/share/opto/compile.hpp 2020-01-17 17:08:42.944135282 +0100 +++ new/src/hotspot/share/opto/compile.hpp 2020-01-17 17:08:42.842135288 +0100 @@ -90,6 +90,16 @@ class Node_Stack; struct Final_Reshape_Counts; +enum LoopOptsMode { + LoopOptsDefault = 0, + LoopOptsNone = 1, + LoopOptsSkipSplitIf = 2, + LoopOptsShenandoahExpand = 3, + LoopOptsShenandoahPostExpand = 4, + LoopOptsVerify = 5, + LoopOptsZgcLastRound = 6 +}; + typedef unsigned int node_idx_t; class NodeCloneInfo { private: @@ -1084,6 +1094,7 @@ void inline_incrementally(PhaseIterGVN& igvn); void inline_string_calls(bool parse_time); void inline_boxing_calls(PhaseIterGVN& igvn); + bool optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode); // Matching, CFG layout, allocation, code generation PhaseCFG* cfg() { return _cfg; } --- old/src/hotspot/share/opto/escape.cpp 2020-01-17 17:08:43.528135250 +0100 +++ new/src/hotspot/share/opto/escape.cpp 2020-01-17 17:08:43.433135255 +0100 @@ -45,6 +45,9 @@ #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), @@ -512,6 +515,10 @@ } case Op_CompareAndExchangeP: case Op_CompareAndExchangeN: +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: +#endif case Op_GetAndSetP: case Op_GetAndSetN: { add_objload_to_connection_graph(n, delayed_worklist); @@ -521,6 +528,12 @@ case Op_StoreN: case Op_StoreNKlass: case Op_StorePConditional: +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahWeakCompareAndSwapN: + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: +#endif case Op_WeakCompareAndSwapP: case Op_WeakCompareAndSwapN: case Op_CompareAndSwapP: @@ -554,8 +567,8 @@ // Pointer stores in G1 barriers looks like unsafe access. // Ignore such stores to be able scalar replace non-escaping // allocations. -#if INCLUDE_G1GC - if (UseG1GC && adr->is_AddP()) { +#if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC + if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) { Node* base = get_addp_base(adr); if (base->Opcode() == Op_LoadP && base->in(MemNode::Address)->is_AddP()) { @@ -563,7 +576,15 @@ Node* tls = get_addp_base(adr); if (tls->Opcode() == Op_ThreadLocal) { int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); - if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) { +#if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC + const int buf_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset() + : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); +#elif INCLUDE_G1GC + const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); +#else + const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); +#endif + if (offs == buf_offset) { break; // G1 pre barrier previous oop value store. } if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { @@ -600,6 +621,13 @@ add_java_object(n, PointsToNode::ArgEscape); break; } +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahEnqueueBarrier: + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); + break; + case Op_ShenandoahLoadReferenceBarrier: + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); +#endif default: ; // Do nothing for nodes not related to EA. } @@ -740,6 +768,14 @@ case Op_CompareAndSwapN: case Op_WeakCompareAndSwapP: case Op_WeakCompareAndSwapN: +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahWeakCompareAndSwapN: +#endif case Op_GetAndSetP: case Op_GetAndSetN: { Node* adr = n->in(MemNode::Address); @@ -753,6 +789,9 @@ } #endif if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || +#if INCLUDE_SHENANDOAHGC + opcode == Op_ShenandoahCompareAndExchangeN || opcode == Op_ShenandoahCompareAndExchangeP || +#endif opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); } @@ -815,6 +854,14 @@ } break; } +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahEnqueueBarrier: + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); + break; + case Op_ShenandoahLoadReferenceBarrier: + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); + break; +#endif default: { // This method should be called only for EA specific nodes which may // miss some edges when they were created. @@ -2113,6 +2160,10 @@ // Check for unsafe oop field access if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || +#if INCLUDE_SHENANDOAHGC + n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) || + n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN) || +#endif n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) { bt = T_OBJECT; (*unsafe) = true; @@ -2378,7 +2429,8 @@ assert(opcode == Op_ConP || opcode == Op_ThreadLocal || opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || - (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); + (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) || + uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity"); } } return base; --- old/src/hotspot/share/opto/graphKit.cpp 2020-01-17 17:08:44.153135216 +0100 +++ new/src/hotspot/share/opto/graphKit.cpp 2020-01-17 17:08:44.056135221 +0100 @@ -43,6 +43,10 @@ #include "opto/runtime.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif //----------------------------GraphKit----------------------------------------- // Main utility constructor. @@ -3738,6 +3742,14 @@ if (ptr == NULL) { // reduce dumb test in callers return NULL; } + +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + ptr = bs->step_over_gc_barrier(ptr); + } +#endif + if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast ptr = ptr->in(1); if (ptr == NULL) return NULL; --- old/src/hotspot/share/opto/loopTransform.cpp 2020-01-17 17:08:44.808135180 +0100 +++ new/src/hotspot/share/opto/loopTransform.cpp 2020-01-17 17:08:44.710135185 +0100 @@ -2852,6 +2852,14 @@ ((bol->in(1)->Opcode() == Op_StorePConditional ) || (bol->in(1)->Opcode() == Op_StoreIConditional ) || (bol->in(1)->Opcode() == Op_StoreLConditional ) || +#if INCLUDE_SHENANDOAHGC + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN ) || + (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP ) || + (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP ) || + (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN ) || +#endif (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) || (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) || (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) || --- old/src/hotspot/share/opto/loopnode.cpp 2020-01-17 17:08:45.434135145 +0100 +++ new/src/hotspot/share/opto/loopnode.cpp 2020-01-17 17:08:45.335135151 +0100 @@ -40,6 +40,10 @@ #include "opto/mulnode.hpp" #include "opto/rootnode.hpp" #include "opto/superword.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif //============================================================================= //------------------------------is_loop_iv------------------------------------- @@ -2713,7 +2717,12 @@ //----------------------------build_and_optimize------------------------------- // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. -void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts, bool last_round) { +void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) { + bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsZgcLastRound); + bool skip_loop_opts = (mode == LoopOptsNone) ; + bool shenandoah_opts = (mode == LoopOptsShenandoahExpand || + mode == LoopOptsShenandoahPostExpand); + ResourceMark rm; int old_progress = C->major_progress(); @@ -2777,7 +2786,7 @@ } // Nothing to do, so get out - bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only; + bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only && !shenandoah_opts; bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn); if (stop_early && !do_expensive_nodes) { _igvn.optimize(); // Cleanup NeverBranches @@ -2856,8 +2865,9 @@ // Given early legal placement, try finding counted loops. This placement // is good enough to discover most loop invariants. - if( !_verify_me && !_verify_only ) - _ltree_root->counted_loop( this ); + if (!_verify_me && !_verify_only && !shenandoah_opts) { + _ltree_root->counted_loop(this); + } // Find latest loop placement. Find ideal loop placement. visited.Clear(); @@ -2928,6 +2938,16 @@ return; } +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && ((ShenandoahBarrierSetC2*) BarrierSet::barrier_set()->barrier_set_c2())->optimize_loops(this, mode, visited, nstack, worklist)) { + _igvn.optimize(); + if (C->log() != NULL) { + log_loop_tree(_ltree_root, _ltree_root, C->log()); + } + return; + } +#endif + if (ReassociateInvariants) { // Reassociate invariants and prep for split_thru_phi for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { @@ -2955,9 +2975,9 @@ // that require basic-block info (like cloning through Phi's) if( SplitIfBlocks && do_split_ifs ) { visited.Clear(); - split_if_with_blocks( visited, nstack, last_round ); + split_if_with_blocks( visited, nstack, mode == LoopOptsZgcLastRound ); NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); ); - if (last_round) { + if (mode == LoopOptsZgcLastRound) { C->set_major_progress(); } } @@ -3958,7 +3978,8 @@ } while(worklist.size() != 0 && LCA != early) { Node* s = worklist.pop(); - if (s->is_Load() || s->Opcode() == Op_SafePoint) { + if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint || + (UseShenandoahGC && s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) { continue; } else if (s->is_MergeMem()) { for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) { @@ -4146,7 +4167,9 @@ } IdealLoopTree* loop = get_loop(least); Node* head = loop->_head; - if (head->is_OuterStripMinedLoop()) { + if (head->is_OuterStripMinedLoop() && + // Verification can't be applied to fully built strip mined loops + head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) { Node* sfpt = head->as_Loop()->outer_safepoint(); ResourceMark rm; Unique_Node_List wq; @@ -4226,6 +4249,9 @@ case Op_HasNegatives: pinned = false; } + if (UseShenandoahGC && n->is_CMove()) { + pinned = false; + } if( pinned ) { IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); if( !chosen_loop->_child ) // Inner loop? @@ -4490,6 +4516,7 @@ } } } +#endif // Collect a R-P-O for the whole CFG. // Result list is in post-order (scan backwards for RPO) @@ -4512,7 +4539,6 @@ } } } -#endif //============================================================================= --- old/src/hotspot/share/opto/loopnode.hpp 2020-01-17 17:08:46.070135110 +0100 +++ new/src/hotspot/share/opto/loopnode.hpp 2020-01-17 17:08:45.967135116 +0100 @@ -925,11 +925,11 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(true) { - build_and_optimize(false, false); + build_and_optimize(LoopOptsVerify); } // build the loop tree and perform any requested optimizations - void build_and_optimize(bool do_split_if, bool skip_loop_opts, bool last_round = false); + void build_and_optimize(LoopOptsMode mode); // Dominators for the sea of nodes void Dominators(); @@ -939,13 +939,13 @@ Node *dom_lca_internal( Node *n1, Node *n2 ) const; // Compute the Ideal Node to Loop mapping - PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false, bool last_round = false) : + PhaseIdealLoop( PhaseIterGVN &igvn, LoopOptsMode mode) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(false) { - build_and_optimize(do_split_ifs, skip_loop_opts, last_round); + build_and_optimize(mode); } // Verify that verify_me made the same decisions as a fresh run. @@ -955,7 +955,7 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(verify_me), _verify_only(false) { - build_and_optimize(false, false); + build_and_optimize(LoopOptsVerify); } // Build and verify the loop tree without modifying the graph. This @@ -1291,8 +1291,10 @@ Node *place_near_use( Node *useblock ) const; Node* try_move_store_before_loop(Node* n, Node *n_ctrl); void try_move_store_after_loop(Node* n); +public: bool identical_backtoback_ifs(Node *n); bool can_split_if(Node *n_ctrl); +private: bool _created_loop_node; public: @@ -1307,7 +1309,6 @@ #ifndef PRODUCT void dump( ) const; void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; - void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; void verify() const; // Major slow :-) void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; IdealLoopTree *get_loop_idx(Node* n) const { @@ -1319,6 +1320,9 @@ static int _loop_invokes; // Count of PhaseIdealLoop invokes static int _loop_work; // Sum of PhaseIdealLoop x _unique #endif + void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; + + PhaseIterGVN& igvn() { return _igvn; } }; // This kit may be used for making of a reserved copy of a loop before this loop --- old/src/hotspot/share/opto/macro.cpp 2020-01-17 17:08:46.678135077 +0100 +++ new/src/hotspot/share/opto/macro.cpp 2020-01-17 17:08:46.579135082 +0100 @@ -47,9 +47,13 @@ #include "opto/subnode.hpp" #include "opto/type.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" #if INCLUDE_G1GC #include "gc/g1/g1ThreadLocalData.hpp" #endif // INCLUDE_G1GC +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // @@ -458,7 +462,14 @@ if (val == mem) { values.at_put(j, mem); } else if (val->is_Store()) { - values.at_put(j, val->in(MemNode::ValueIn)); + Node* n = val->in(MemNode::ValueIn); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + n = bs->step_over_gc_barrier(n); + } +#endif + values.at_put(j, n); } else if(val->is_Proj() && val->in(0) == alloc) { values.at_put(j, _igvn.zerocon(ft)); } else if (val->is_Phi()) { @@ -569,7 +580,14 @@ // hit a sentinel, return appropriate 0 value return _igvn.zerocon(ft); } else if (mem->is_Store()) { - return mem->in(MemNode::ValueIn); + Node* n = mem->in(MemNode::ValueIn); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + n = bs->step_over_gc_barrier(n); + } +#endif + return n; } else if (mem->is_Phi()) { // attempt to produce a Phi reflecting the values on the input paths of the Phi Node_Stack value_phis(a, 8); @@ -646,6 +664,7 @@ k < kmax && can_eliminate; k++) { Node* n = use->fast_out(k); if (!n->is_Store() && n->Opcode() != Op_CastP2X && + SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&) !(n->is_ArrayCopy() && n->as_ArrayCopy()->is_clonebasic() && n->in(ArrayCopyNode::Dest) == use)) { --- old/src/hotspot/share/opto/macroArrayCopy.cpp 2020-01-17 17:08:47.292135043 +0100 +++ new/src/hotspot/share/opto/macroArrayCopy.cpp 2020-01-17 17:08:47.191135048 +0100 @@ -31,6 +31,10 @@ #include "opto/macro.hpp" #include "opto/runtime.hpp" #include "utilities/align.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#endif void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) { @@ -552,7 +556,7 @@ // At this point we know we do not need type checks on oop stores. BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - if (alloc != NULL && !bs->array_copy_requires_gc_barriers(copy_type)) { + if (alloc != NULL && !bs->array_copy_requires_gc_barriers(copy_type) && !UseShenandoahGC) { // If we do not need gc barriers, copy using the jint or jlong stub. copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), --- old/src/hotspot/share/opto/matcher.cpp 2020-01-17 17:08:47.887135010 +0100 +++ new/src/hotspot/share/opto/matcher.cpp 2020-01-17 17:08:47.787135015 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2254,6 +2254,14 @@ case Op_StorePConditional: case Op_StoreIConditional: case Op_StoreLConditional: +#if INCLUDE_SHENANDOAHGC + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahWeakCompareAndSwapN: + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: +#endif case Op_CompareAndExchangeB: case Op_CompareAndExchangeS: case Op_CompareAndExchangeI: @@ -2497,6 +2505,14 @@ // that a monitor exit operation contains a serializing instruction. if (xop == Op_MemBarVolatile || +#if INCLUDE_SHENANDOAHGC + xop == Op_ShenandoahCompareAndExchangeP || + xop == Op_ShenandoahCompareAndExchangeN || + xop == Op_ShenandoahWeakCompareAndSwapP || + xop == Op_ShenandoahWeakCompareAndSwapN || + xop == Op_ShenandoahCompareAndSwapN || + xop == Op_ShenandoahCompareAndSwapP || +#endif xop == Op_CompareAndExchangeB || xop == Op_CompareAndExchangeS || xop == Op_CompareAndExchangeI || --- old/src/hotspot/share/opto/memnode.cpp 2020-01-17 17:08:48.506134976 +0100 +++ new/src/hotspot/share/opto/memnode.cpp 2020-01-17 17:08:48.403134981 +0100 @@ -52,6 +52,9 @@ #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // Portions of code courtesy of Clifford Click @@ -1112,6 +1115,11 @@ (tp != NULL) && tp->is_ptr_to_boxed_value()) { intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + base = ((ShenandoahBarrierSetC2*) BarrierSet::barrier_set()->barrier_set_c2())->step_over_gc_barrier(base); + } +#endif if (base != NULL && base->is_Proj() && base->as_Proj()->_con == TypeFunc::Parms && base->in(0)->is_CallStaticJava() && --- old/src/hotspot/share/opto/mulnode.cpp 2020-01-17 17:08:49.151134940 +0100 +++ new/src/hotspot/share/opto/mulnode.cpp 2020-01-17 17:08:49.048134946 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,10 @@ #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" #include "opto/subnode.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif // Portions of code courtesy of Clifford Click @@ -474,6 +478,15 @@ Node *load = in(1); uint lop = load->Opcode(); +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && ShenandoahBarrierC2Support::is_gc_state_load(load)) { + // Do not touch the load+mask, we would match the whole sequence exactly. + // Converting the load to LoadUB/LoadUS would mismatch and waste a register + // on the barrier fastpath. + return NULL; + } +#endif + // Masking bits off of a Character? Hi bits are already zero. if( lop == Op_LoadUS && (mask & 0xFFFF0000) ) // Can we make a smaller mask? --- old/src/hotspot/share/opto/node.hpp 2020-01-17 17:08:49.753134907 +0100 +++ new/src/hotspot/share/opto/node.hpp 2020-01-17 17:08:49.652134913 +0100 @@ -143,6 +143,7 @@ class RootNode; class SafePointNode; class SafePointScalarObjectNode; +class ShenandoahBarrierNode; class StartNode; class State; class StoreNode; @@ -678,6 +679,7 @@ DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) + DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7) DEFINE_CLASS_ID(Proj, Node, 3) DEFINE_CLASS_ID(CatchProj, Proj, 0) @@ -877,6 +879,7 @@ DEFINE_CLASS_QUERY(Root) DEFINE_CLASS_QUERY(SafePoint) DEFINE_CLASS_QUERY(SafePointScalarObject) + DEFINE_CLASS_QUERY(ShenandoahBarrier) DEFINE_CLASS_QUERY(Start) DEFINE_CLASS_QUERY(Store) DEFINE_CLASS_QUERY(Sub) --- old/src/hotspot/share/opto/phaseX.cpp 2020-01-17 17:08:50.361134874 +0100 +++ new/src/hotspot/share/opto/phaseX.cpp 2020-01-17 17:08:50.256134879 +0100 @@ -2106,6 +2106,10 @@ default: break; } + if (UseShenandoahGC) { + // TODO: Should we call this for ZGC as well? + BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn->_worklist, old); + } } } --- old/src/hotspot/share/opto/phasetype.hpp 2020-01-17 17:08:50.967134840 +0100 +++ new/src/hotspot/share/opto/phasetype.hpp 2020-01-17 17:08:50.867134846 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,7 @@ PHASE_MATCHING, PHASE_INCREMENTAL_INLINE, PHASE_INCREMENTAL_BOXING_INLINE, + PHASE_BEFORE_BARRIER_EXPAND, PHASE_BEFORE_MACRO_EXPANSION, PHASE_END, PHASE_FAILURE, @@ -89,6 +90,7 @@ case PHASE_MATCHING: return "After matching"; case PHASE_INCREMENTAL_INLINE: return "Incremental Inline"; case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline"; + case PHASE_BEFORE_BARRIER_EXPAND: return "Before Barrier Expand"; case PHASE_BEFORE_MACRO_EXPANSION: return "Before macro expansion"; case PHASE_END: return "End"; case PHASE_FAILURE: return "Failure"; --- old/src/hotspot/share/opto/type.hpp 2020-01-17 17:08:51.553134808 +0100 +++ new/src/hotspot/share/opto/type.hpp 2020-01-17 17:08:51.452134813 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1770,6 +1770,8 @@ // UseOptoBiasInlining #define XorXNode XorLNode #define StoreXConditionalNode StoreLConditionalNode +#define LoadXNode LoadLNode +#define StoreXNode StoreLNode // Opcodes #define Op_LShiftX Op_LShiftL #define Op_AndX Op_AndL @@ -1815,6 +1817,8 @@ // UseOptoBiasInlining #define XorXNode XorINode #define StoreXConditionalNode StoreIConditionalNode +#define LoadXNode LoadINode +#define StoreXNode StoreINode // Opcodes #define Op_LShiftX Op_LShiftI #define Op_AndX Op_AndI --- old/src/hotspot/share/runtime/mutexLocker.cpp 2020-01-17 17:08:52.163134774 +0100 +++ new/src/hotspot/share/runtime/mutexLocker.cpp 2020-01-17 17:08:52.057134780 +0100 @@ -216,6 +216,14 @@ def(MarkStackFreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); def(MarkStackChunkList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); } + if (UseShenandoahGC) { + def(SATB_Q_FL_lock , PaddedMutex , access, true, Monitor::_safepoint_check_never); + def(SATB_Q_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never); + def(Shared_SATB_Q_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never); + + def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); + def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); + } def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_sometimes); def(DerivedPointerTableGC_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_sometimes); --- old/src/hotspot/share/runtime/sharedRuntime.cpp 2020-01-17 17:08:52.757134742 +0100 +++ new/src/hotspot/share/runtime/sharedRuntime.cpp 2020-01-17 17:08:52.655134747 +0100 @@ -2880,6 +2880,24 @@ GCLocker::unlock_critical(thread); JRT_END +JRT_LEAF(oopDesc*, SharedRuntime::pin_object(JavaThread* thread, oopDesc* obj)) + assert(Universe::heap()->supports_object_pinning(), "Why we here?"); + assert(UseShenandoahGC, "only supported in Shenandoah for now"); + assert(obj != NULL, "Should not be null"); + oop o(obj); + o = Universe::heap()->pin_object(thread, o); + assert(o != NULL, "Should not be null"); + return o; +JRT_END + +JRT_LEAF(void, SharedRuntime::unpin_object(JavaThread* thread, oopDesc* obj)) + assert(Universe::heap()->supports_object_pinning(), "Why we here?"); + assert(UseShenandoahGC, "only supported in Shenandoah for now"); + assert(obj != NULL, "Should not be null"); + oop o(obj); + Universe::heap()->unpin_object(thread, o); +JRT_END + // ------------------------------------------------------------------------- // Java-Java calling convention // (what you use when Java calls Java) --- old/src/hotspot/share/runtime/sharedRuntime.hpp 2020-01-17 17:08:53.384134707 +0100 +++ new/src/hotspot/share/runtime/sharedRuntime.hpp 2020-01-17 17:08:53.278134713 +0100 @@ -487,6 +487,10 @@ // Block before entering a JNI critical method static void block_for_jni_critical(JavaThread* thread); + // Pin/Unpin object + static oopDesc* pin_object(JavaThread* thread, oopDesc* obj); + static void unpin_object(JavaThread* thread, oopDesc* obj); + // A compiled caller has just called the interpreter, but compiled code // exists. Patch the caller so he no longer calls into the interpreter. static void fixup_callers_callsite(Method* moop, address ret_pc); --- old/src/hotspot/share/runtime/stackValue.cpp 2020-01-17 17:08:53.982134674 +0100 +++ new/src/hotspot/share/runtime/stackValue.cpp 2020-01-17 17:08:53.878134680 +0100 @@ -32,6 +32,9 @@ #if INCLUDE_ZGC #include "gc/z/zBarrier.inline.hpp" #endif +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#endif StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv) { if (sv->is_location()) { @@ -106,8 +109,15 @@ } else { value.noop = *(narrowOop*) value_addr; } - // Decode narrowoop and wrap a handle around the oop - Handle h(Thread::current(), CompressedOops::decode(value.noop)); + // Decode narrowoop + oop val = CompressedOops::decode(value.noop); + // Deoptimization must make sure all oops have passed load barriers +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); + } +#endif + Handle h(Thread::current(), val); // Wrap a handle around the oop return new StackValue(h); } #endif @@ -122,13 +132,17 @@ val = (oop)NULL; } #endif + // Deoptimization must make sure all oops have passed load barriers #if INCLUDE_ZGC - // Deoptimization must make sure all oop have passed load barrier if (UseZGC) { val = ZBarrier::load_barrier_on_oop_field_preloaded((oop*)value_addr, val); } #endif - +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); + } +#endif Handle h(Thread::current(), val); // Wrap a handle around the oop return new StackValue(h); } --- old/src/hotspot/share/runtime/vmOperations.hpp 2020-01-17 17:08:54.570134642 +0100 +++ new/src/hotspot/share/runtime/vmOperations.hpp 2020-01-17 17:08:54.467134647 +0100 @@ -99,6 +99,15 @@ template(HeapIterateOperation) \ template(ReportJavaOutOfMemory) \ template(JFRCheckpoint) \ + template(ShenandoahFullGC) \ + template(ShenandoahInitMark) \ + template(ShenandoahFinalMarkStartEvac) \ + template(ShenandoahFinalEvac) \ + template(ShenandoahInitTraversalGC) \ + template(ShenandoahFinalTraversalGC) \ + template(ShenandoahInitUpdateRefs) \ + template(ShenandoahFinalUpdateRefs) \ + template(ShenandoahDegeneratedGC) \ template(Exit) \ template(LinuxDllLoad) \ template(RotateGCLog) \ --- old/src/hotspot/share/utilities/globalDefinitions.hpp 2020-01-17 17:08:55.163134609 +0100 +++ new/src/hotspot/share/utilities/globalDefinitions.hpp 2020-01-17 17:08:55.060134615 +0100 @@ -69,6 +69,7 @@ #define UINT64_FORMAT_X "%" PRIx64 #define INT64_FORMAT_W(width) "%" #width PRId64 #define UINT64_FORMAT_W(width) "%" #width PRIu64 +#define UINT64_FORMAT_X_W(width) "%" #width PRIx64 #define PTR64_FORMAT "0x%016" PRIx64 --- old/src/hotspot/share/utilities/macros.hpp 2020-01-17 17:08:55.763134576 +0100 +++ new/src/hotspot/share/utilities/macros.hpp 2020-01-17 17:08:55.665134581 +0100 @@ -221,6 +221,24 @@ #define NOT_SERIALGC_RETURN_(code) { return code; } #endif // INCLUDE_SERIALGC +#ifndef INCLUDE_SHENANDOAHGC +#define INCLUDE_SHENANDOAHGC 1 +#endif // INCLUDE_SHENANDOAHGC + +#if INCLUDE_SHENANDOAHGC +#define SHENANDOAHGC_ONLY(x) x +#define SHENANDOAHGC_ONLY_ARG(arg) arg, +#define NOT_SHENANDOAHGC(x) +#define NOT_SHENANDOAHGC_RETURN /* next token must be ; */ +#define NOT_SHENANDOAHGC_RETURN_(code) /* next token must be ; */ +#else +#define SHENANDOAHGC_ONLY(x) +#define SHENANDOAHGC_ONLY_ARG(arg) +#define NOT_SHENANDOAHGC(x) x +#define NOT_SHENANDOAHGC_RETURN {} +#define NOT_SHENANDOAHGC_RETURN_(code) { return code; } +#endif // INCLUDE_SHENANDOAHGC + #ifndef INCLUDE_ZGC #define INCLUDE_ZGC 1 #endif // INCLUDE_ZGC --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java 2020-01-17 17:08:56.359134543 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java 2020-01-17 17:08:56.256134549 +0100 @@ -36,6 +36,7 @@ import sun.jvm.hotspot.gc.epsilon.*; import sun.jvm.hotspot.gc.parallel.*; import sun.jvm.hotspot.gc.shared.*; +import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.g1.*; import sun.jvm.hotspot.gc.z.*; import sun.jvm.hotspot.interpreter.*; @@ -1113,6 +1114,10 @@ } else if (collHeap instanceof EpsilonHeap) { anno = "Epsilon "; bad = false; + } else if (collHeap instanceof ShenandoahHeap) { + ShenandoahHeap heap = (ShenandoahHeap) collHeap; + anno = "ShenandoahHeap "; + bad = false; } else if (collHeap instanceof ZCollectedHeap) { ZCollectedHeap heap = (ZCollectedHeap) collHeap; anno = "ZHeap "; --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java 2020-01-17 17:08:56.986134508 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java 2020-01-17 17:08:56.885134514 +0100 @@ -37,6 +37,7 @@ public static final CollectedHeapName G1 = new CollectedHeapName("G1"); public static final CollectedHeapName EPSILON = new CollectedHeapName("Epsilon"); public static final CollectedHeapName Z = new CollectedHeapName("Z"); + public static final CollectedHeapName SHENANDOAH = new CollectedHeapName("Shenandoah"); public String toString() { return name; --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java 2020-01-17 17:08:57.582134476 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java 2020-01-17 17:08:57.475134482 +0100 @@ -67,6 +67,12 @@ _z_allocation_stall ("Allocation Stall"), _z_proactive ("Proactive"), + _shenandoah_allocation_failure_evac ("Allocation Failure During Evacuation"), + _shenandoah_stop_vm ("Stopping VM"), + _shenandoah_concurrent_gc ("Concurrent GC"), + _shenandoah_traversal_gc ("Traversal GC"), + _shenandoah_upgrade_to_full_gc ("Upgrade To Full GC"), + _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"); private final String value; --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java 2020-01-17 17:08:58.168134443 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java 2020-01-17 17:08:58.066134449 +0100 @@ -38,6 +38,7 @@ G1Old ("G1Old"), G1Full ("G1Full"), Z ("Z"), + Shenandoah ("Shenandoah"), NA ("N/A"), GCNameEndSentinel ("GCNameEndSentinel"); --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java 2020-01-17 17:08:58.751134411 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java 2020-01-17 17:08:58.651134417 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ import sun.jvm.hotspot.gc.parallel.ParallelScavengeHeap; import sun.jvm.hotspot.gc.serial.SerialHeap; import sun.jvm.hotspot.gc.shared.CollectedHeap; +import sun.jvm.hotspot.gc.shenandoah.ShenandoahHeap; import sun.jvm.hotspot.gc.z.ZCollectedHeap; import sun.jvm.hotspot.oops.Oop; import sun.jvm.hotspot.runtime.BasicType; @@ -110,6 +111,7 @@ addHeapTypeIfInDB(db, G1CollectedHeap.class); addHeapTypeIfInDB(db, EpsilonHeap.class); addHeapTypeIfInDB(db, ZCollectedHeap.class); + addHeapTypeIfInDB(db, ShenandoahHeap.class); mainThreadGroupField = type.getOopField("_main_thread_group"); systemThreadGroupField = type.getOopField("_system_thread_group"); --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java 2020-01-17 17:08:59.353134378 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java 2020-01-17 17:08:59.251134384 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ import sun.jvm.hotspot.gc.shared.*; import sun.jvm.hotspot.gc.epsilon.*; import sun.jvm.hotspot.gc.g1.*; +import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.parallel.*; import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.runtime.*; @@ -439,6 +440,10 @@ } else if (heap instanceof G1CollectedHeap) { G1CollectedHeap g1h = (G1CollectedHeap) heap; g1h.heapRegionIterate(lrc); + } else if (heap instanceof ShenandoahHeap) { + // Operation (currently) not supported with Shenandoah GC. Print + // a warning and leave the list of live regions empty. + System.err.println("Warning: Operation not supported with Shenandoah GC"); } else if (heap instanceof EpsilonHeap) { EpsilonHeap eh = (EpsilonHeap) heap; liveRegions.add(eh.space().top()); --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java 2020-01-17 17:08:59.948134345 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java 2020-01-17 17:08:59.847134351 +0100 @@ -55,6 +55,7 @@ CMS_Final_Remark, G1CollectFull, ZOperation, + ShenandoahOperation, G1CollectForAllocation, G1IncCollectionPause, EnableBiasedLocking, --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java 2020-01-17 17:09:00.546134312 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java 2020-01-17 17:09:00.445134318 +0100 @@ -29,6 +29,7 @@ import sun.jvm.hotspot.gc.g1.*; import sun.jvm.hotspot.gc.parallel.*; import sun.jvm.hotspot.gc.serial.*; +import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.shared.*; import sun.jvm.hotspot.gc.z.*; import sun.jvm.hotspot.debugger.JVMDebugger; @@ -83,7 +84,11 @@ printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap)); printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap)); printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap)); - printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); + if (heap instanceof ShenandoahHeap) { + printValMB("ShenandoahRegionSize = ", ShenandoahHeapRegion.regionSizeBytes()); + } else { + printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); + } System.out.println(); System.out.println("Heap Usage:"); @@ -126,6 +131,14 @@ printValMB("used = ", oldGen.used()); printValMB("free = ", oldFree); System.out.println(alignment + (double)oldGen.used() * 100.0 / oldGen.capacity() + "% used"); + } else if (heap instanceof ShenandoahHeap) { + ShenandoahHeap sh = (ShenandoahHeap) heap; + long num_regions = sh.numOfRegions(); + System.out.println("Shenandoah Heap:"); + System.out.println(" regions = " + num_regions); + printValMB("capacity = ", num_regions * ShenandoahHeapRegion.regionSizeBytes()); + printValMB("used = ", sh.used()); + printValMB("committed = ", sh.committed()); } else if (heap instanceof EpsilonHeap) { EpsilonHeap eh = (EpsilonHeap) heap; printSpace(eh.space()); @@ -181,6 +194,14 @@ l = getFlagValue("ParallelGCThreads", flagMap); System.out.println("with " + l + " thread(s)"); return; + } + + l = getFlagValue("UseShenandoahGC", flagMap); + if (l == 1L) { + System.out.print("Shenandoah GC "); + l = getFlagValue("ParallelGCThreads", flagMap); + System.out.println("with " + l + " thread(s)"); + return; } System.out.println("Mark Sweep Compact GC"); --- old/src/jdk.jfr/share/conf/jfr/default.jfc 2020-01-17 17:09:01.145134279 +0100 +++ new/src/jdk.jfr/share/conf/jfr/default.jfc 2020-01-17 17:09:01.046134285 +0100 @@ -420,6 +420,15 @@ false + + false + everyChunk + + + + false + + true false --- old/src/jdk.jfr/share/conf/jfr/profile.jfc 2020-01-17 17:09:01.744134246 +0100 +++ new/src/jdk.jfr/share/conf/jfr/profile.jfc 2020-01-17 17:09:01.643134252 +0100 @@ -420,6 +420,15 @@ false + + false + everyChunk + + + + false + + true true --- old/test/hotspot/jtreg/TEST.ROOT 2020-01-17 17:09:02.340134213 +0100 +++ new/test/hotspot/jtreg/TEST.ROOT 2020-01-17 17:09:02.242134219 +0100 @@ -48,6 +48,7 @@ vm.gc.Serial \ vm.gc.Parallel \ vm.gc.ConcMarkSweep \ + vm.gc.Shenandoah \ vm.gc.Epsilon \ vm.gc.Z \ vm.jvmci \ --- old/test/hotspot/jtreg/TEST.groups 2020-01-17 17:09:02.922134181 +0100 +++ new/test/hotspot/jtreg/TEST.groups 2020-01-17 17:09:02.820134187 +0100 @@ -175,7 +175,8 @@ :tier1_gc_1 \ :tier1_gc_2 \ :tier1_gc_gcold \ - :tier1_gc_gcbasher + :tier1_gc_gcbasher \ + :tier1_gc_shenandoah hotspot_not_fast_gc = \ :hotspot_gc \ @@ -195,7 +196,8 @@ -gc/stress \ -gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \ -gc/cms/TestMBeanCMS.java \ - -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java + -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \ + -gc/shenandoah tier1_gc_gcold = \ gc/stress/gcold/TestGCOldWithG1.java \ @@ -208,6 +210,51 @@ gc/stress/gcbasher/TestGCBasherWithCMS.java \ gc/stress/gcbasher/TestGCBasherWithSerial.java \ gc/stress/gcbasher/TestGCBasherWithParallel.java + +tier1_gc_shenandoah = \ + gc/shenandoah/options/ \ + gc/shenandoah/compiler/ \ + gc/shenandoah/mxbeans/ \ + gc/shenandoah/TestSmallHeap.java \ + gc/shenandoah/oom/ + +tier2_gc_shenandoah = \ + runtime/MemberName/MemberNameLeak.java \ + runtime/CompressedOops/UseCompressedOops.java \ + gc/TestHumongousReferenceObject.java \ + gc/TestSystemGC.java \ + gc/arguments/TestDisableDefaultGC.java \ + gc/arguments/TestUseCompressedOopsErgo.java \ + gc/arguments/TestAlignmentToUseLargePages.java \ + gc/class_unloading/TestClassUnloadingDisabled.java \ + gc/ergonomics/TestInitialGCThreadLogging.java \ + gc/ergonomics/TestDynamicNumberOfGCThreads.java \ + gc/logging/TestGCId.java \ + gc/metaspace/TestMetaspacePerfCounters.java \ + gc/metaspace/TestMetaspacePerfCounters.java \ + gc/startup_warnings/TestShenandoah.java \ + gc/TestFullGCALot.java \ + gc/logging/TestUnifiedLoggingSwitchStress.java \ + runtime/Metaspace/DefineClass.java \ + gc/shenandoah/ \ + serviceability/sa/TestHeapDumpForInvokeDynamic.java \ + -gc/shenandoah/TestStringDedupStress.java \ + -gc/shenandoah/jni/CriticalNativeStress.java \ + -:tier1_gc_shenandoah + +tier3_gc_shenandoah = \ + gc/stress/gcold/TestGCOldWithShenandoah.java \ + gc/stress/gcbasher/TestGCBasherWithShenandoah.java \ + gc/stress/gclocker/TestGCLockerWithShenandoah.java \ + gc/stress/systemgc/TestSystemGCWithShenandoah.java \ + gc/shenandoah/TestStringDedupStress.java \ + gc/shenandoah/jni/CriticalNativeStress.java \ + -:tier2_gc_shenandoah + +hotspot_gc_shenandoah = \ + :tier1_gc_shenandoah \ + :tier2_gc_shenandoah \ + :tier3_gc_shenandoah tier1_runtime = \ runtime/ \ --- old/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java 2020-01-17 17:09:03.511134149 +0100 +++ new/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java 2020-01-17 17:09:03.407134155 +0100 @@ -39,7 +39,9 @@ * CMS, * CMSCondMark, * Serial, - * Parallel} + * Parallel, + * Shenandoah, + * ShenandoahTraversal} */ @@ -100,6 +102,18 @@ procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC"; procArgs[argcount - 2] = "-XX:+UseCondCardMark"; break; + case "Shenandoah": + argcount = 8; + procArgs = new String[argcount]; + procArgs[argcount - 2] = "-XX:+UseShenandoahGC"; + break; + case "ShenandoahTraversal": + argcount = 10; + procArgs = new String[argcount]; + procArgs[argcount - 4] = "-XX:+UseShenandoahGC"; + procArgs[argcount - 3] = "-XX:+UnlockExperimentalVMOptions"; + procArgs[argcount - 2] = "-XX:ShenandoahGCMode=traversal"; + break; default: throw new RuntimeException("unexpected test type " + testType); } @@ -355,6 +369,17 @@ "ret" }; break; + case "Shenandoah": + case "ShenandoahTraversal": + // Shenandoah generates normal object graphs for + // volatile stores + matches = new String[] { + "membar_release (elided)", + "stlrw", + "membar_volatile (elided)", + "ret" + }; + break; } } else { switch (testType) { @@ -418,6 +443,20 @@ "ret" }; break; + + case "Shenandoah": + case "ShenandoahTraversal": + // Shenandoah generates normal object graphs for + // volatile stores + matches = new String[] { + "membar_release", + "dmb ish", + "strw", + "membar_volatile", + "dmb ish", + "ret" + }; + break; } } @@ -520,6 +559,17 @@ "ret" }; break; + case "Shenandoah": + case "ShenandoahTraversal": + // For volatile CAS, Shenanodoah generates normal + // graphs with a shenandoah-specific cmpxchg + matches = new String[] { + "membar_release (elided)", + "cmpxchgw_acq_shenandoah", + "membar_acquire (elided)", + "ret" + }; + break; } } else { switch (testType) { @@ -762,6 +812,19 @@ "membar_acquire", "dmb ish", "ret" + }; + break; + case "Shenandoah": + case "ShenandoahTraversal": + // For volatile CAS, Shenanodoah generates normal + // graphs with a shenandoah-specific cmpxchg + matches = new String[] { + "membar_release", + "dmb ish", + "cmpxchgw_shenandoah", + "membar_acquire", + "dmb ish", + "ret" }; break; } --- old/test/hotspot/jtreg/gc/TestFullGCCount.java 2020-01-17 17:09:04.114134116 +0100 +++ new/test/hotspot/jtreg/gc/TestFullGCCount.java 2020-01-17 17:09:04.012134121 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,9 @@ * @test TestFullGCCount.java * @bug 7072527 * @summary CMS: JMM GC counters overcount in some cases - * @requires !(vm.gc.ConcMarkSweep & vm.opt.ExplicitGCInvokesConcurrent == true) + * @requires !(vm.gc == "ConcMarkSweep" & vm.opt.ExplicitGCInvokesConcurrent == true) + * @comment Shenandoah has "ExplicitGCInvokesConcurrent" on by default + * @requires !(vm.gc == "Shenandoah" & vm.opt.ExplicitGCInvokesConcurrent != false) * @modules java.management * @run main/othervm -Xlog:gc TestFullGCCount */ --- old/test/hotspot/jtreg/gc/TestHumongousReferenceObject.java 2020-01-17 17:09:04.694134084 +0100 +++ new/test/hotspot/jtreg/gc/TestHumongousReferenceObject.java 2020-01-17 17:09:04.595134089 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,16 @@ * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UseG1GC -XX:G1HeapRegionSize=4M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UseG1GC -XX:G1HeapRegionSize=8M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject */ + +/* + * @test TestHumongousReferenceObjectShenandoah + * @summary Test that verifies that iteration over large, plain Java objects, that potentially cross region boundaries, with references in them works. + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @bug 8151499 8153734 + * @modules java.base/jdk.internal.vm.annotation + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xms128m -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahHeapRegionSize=8M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xms128m -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahHeapRegionSize=8M -XX:ContendedPaddingWidth=8192 -XX:+UnlockDiagnosticVMOptions -XX:+ShenandoahVerify TestHumongousReferenceObject + */ public class TestHumongousReferenceObject { /* --- old/test/hotspot/jtreg/gc/TestSystemGC.java 2020-01-17 17:09:05.281134051 +0100 +++ new/test/hotspot/jtreg/gc/TestSystemGC.java 2020-01-17 17:09:05.180134057 +0100 @@ -45,6 +45,14 @@ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC */ +/* + * @test TestSystemGCShenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Runs System.gc() with different flags. + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSystemGC + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + */ public class TestSystemGC { public static void main(String args[]) throws Exception { System.gc(); --- old/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java 2020-01-17 17:09:05.865134019 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java 2020-01-17 17:09:05.763134025 +0100 @@ -48,6 +48,16 @@ * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages */ +/** + * @test TestAlignmentToUseLargePagesShenandoah + * @key gc + * @bug 8024396 + * @comment Graal does not support Shenandoah + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -Xms71M -Xmx91M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms71M -Xmx91M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-UseLargePages TestAlignmentToUseLargePages + */ + public class TestAlignmentToUseLargePages { public static void main(String args[]) throws Exception { // nothing to do --- old/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java 2020-01-17 17:09:06.462133986 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java 2020-01-17 17:09:06.360133992 +0100 @@ -44,6 +44,7 @@ "-XX:-UseG1GC", "-XX:-UseConcMarkSweepGC", "-XX:+UnlockExperimentalVMOptions", + "-XX:-UseShenandoahGC", "-XX:-UseZGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); --- old/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java 2020-01-17 17:09:07.048133954 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java 2020-01-17 17:09:06.946133960 +0100 @@ -25,7 +25,7 @@ * @test TestMaxMinHeapFreeRatioFlags * @key gc * @summary Verify that heap size changes according to max and min heap free ratios. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java 2020-01-17 17:09:07.640133921 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java 2020-01-17 17:09:07.539133927 +0100 @@ -26,7 +26,7 @@ * @key gc * @bug 8025166 * @summary Verify that heap devided among generations according to NewRatio - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java 2020-01-17 17:09:08.204133890 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java 2020-01-17 17:09:08.109133896 +0100 @@ -26,7 +26,7 @@ * @key gc * @bug 8025166 * @summary Verify that young gen size conforms values specified by NewSize, MaxNewSize and Xmn options - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java 2020-01-17 17:09:08.782133858 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java 2020-01-17 17:09:08.679133864 +0100 @@ -25,7 +25,7 @@ * @test TestShrinkHeapInSteps * @key gc * @summary Verify that -XX:-ShrinkHeapInSteps works properly. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java 2020-01-17 17:09:09.368133826 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java 2020-01-17 17:09:09.266133832 +0100 @@ -25,7 +25,7 @@ * @test TestSurvivorRatioFlag * @key gc * @summary Verify that actual survivor ratio is equal to specified SurvivorRatio value - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java 2020-01-17 17:09:09.947133794 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java 2020-01-17 17:09:09.845133800 +0100 @@ -27,7 +27,7 @@ * @summary Verify that option TargetSurvivorRatio affects survivor space occupancy after minor GC. * @requires (vm.opt.ExplicitGCInvokesConcurrent == null) | (vm.opt.ExplicitGCInvokesConcurrent == false) * @requires (vm.opt.UseJVMCICompiler == null) | (vm.opt.UseJVMCICompiler == false) - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java 2020-01-17 17:09:10.529133762 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java 2020-01-17 17:09:10.426133768 +0100 @@ -54,6 +54,21 @@ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC */ +/* + * @test TestUseCompressedOopsErgoShenandoah + * @key gc + * @bug 8010722 + * @comment Graal does not support Shenandoah + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management/sun.management + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm TestUseCompressedOopsErgo -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + */ + public class TestUseCompressedOopsErgo { public static void main(String args[]) throws Exception { --- old/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java 2020-01-17 17:09:11.112133730 +0100 +++ new/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java 2020-01-17 17:09:11.009133736 +0100 @@ -28,7 +28,7 @@ * @summary Runs an simple application (GarbageProducer) with various combinations of -XX:{+|-}Verify{After|Before}GC flags and checks that output contain or doesn't contain expected patterns - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @modules java.management * @library /test/lib --- old/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java 2020-01-17 17:09:11.692133698 +0100 +++ new/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java 2020-01-17 17:09:11.591133704 +0100 @@ -64,6 +64,24 @@ * -XX:-ClassUnloading -XX:+UseConcMarkSweepGC TestClassUnloadingDisabled */ +/* + * @test TestClassUnloadingDisabledShenandoah + * @key gc + * @bug 8114823 + * @comment Graal does not support Shenandoah + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.opt.ExplicitGCInvokesConcurrent != true + * @requires vm.opt.ClassUnloading != true + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:-ClassUnloading -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestClassUnloadingDisabled + */ + import java.io.File; import java.io.IOException; import java.nio.file.Files; --- old/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java 2020-01-17 17:09:12.282133666 +0100 +++ new/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java 2020-01-17 17:09:12.181133671 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,14 @@ * @key gc * @modules java.base/jdk.internal.misc * @library /test/lib + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestDynamicNumberOfGCThreads */ import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; +import sun.hotspot.gc.GC; public class TestDynamicNumberOfGCThreads { public static void main(String[] args) throws Exception { @@ -42,6 +46,10 @@ testDynamicNumberOfGCThreads("UseG1GC"); testDynamicNumberOfGCThreads("UseParallelGC"); + + if (GC.Shenandoah.isSupported()) { + testDynamicNumberOfGCThreads("UseShenandoahGC"); + } } private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output) { @@ -51,7 +59,7 @@ private static void testDynamicNumberOfGCThreads(String gcFlag) throws Exception { // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled - String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", GCTest.class.getName()}; + String[] baseArgs = {"-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", GCTest.class.getName()}; // Base test with gc and +UseDynamicNumberOfGCThreads: ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs); --- old/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java 2020-01-17 17:09:12.872133633 +0100 +++ new/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java 2020-01-17 17:09:12.772133639 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,14 @@ * @key gc * @modules java.base/jdk.internal.misc * @library /test/lib + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestInitialGCThreadLogging */ import jdk.test.lib.process.ProcessTools; import jdk.test.lib.process.OutputAnalyzer; +import sun.hotspot.gc.GC; public class TestInitialGCThreadLogging { public static void main(String[] args) throws Exception { @@ -42,6 +46,10 @@ testInitialGCThreadLogging("UseG1GC", "GC Thread"); testInitialGCThreadLogging("UseParallelGC", "ParGC Thread"); + + if (GC.Shenandoah.isSupported()) { + testInitialGCThreadLogging("UseShenandoahGC", "Shenandoah GC Thread"); + } } private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output, String threadName) { @@ -51,7 +59,7 @@ private static void testInitialGCThreadLogging(String gcFlag, String threadName) throws Exception { // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled - String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", "-version"}; + String[] baseArgs = {"-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", "-version"}; // Base test with gc and +UseDynamicNumberOfGCThreads: ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs); --- old/test/hotspot/jtreg/gc/logging/TestGCId.java 2020-01-17 17:09:13.445133602 +0100 +++ new/test/hotspot/jtreg/gc/logging/TestGCId.java 2020-01-17 17:09:13.348133607 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,14 @@ * @library /test/lib * @modules java.base/jdk.internal.misc * java.management + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestGCId */ import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; +import sun.hotspot.gc.GC; public class TestGCId { public static void main(String[] args) throws Exception { @@ -41,6 +45,9 @@ testGCId("UseG1GC"); testGCId("UseConcMarkSweepGC"); testGCId("UseSerialGC"); + if (GC.Shenandoah.isSupported()) { + testGCId("UseShenandoahGC"); + } } private static void verifyContainsGCIDs(OutputAnalyzer output) { @@ -51,7 +58,7 @@ private static void testGCId(String gcFlag) throws Exception { ProcessBuilder pb_default = - ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-Xlog:gc", "-Xmx10M", GCTest.class.getName()); + ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xlog:gc", "-Xmx10M", GCTest.class.getName()); verifyContainsGCIDs(new OutputAnalyzer(pb_default.start())); } --- old/test/hotspot/jtreg/gc/metaspace/TestMetaspacePerfCounters.java 2020-01-17 17:09:14.044133569 +0100 +++ new/test/hotspot/jtreg/gc/metaspace/TestMetaspacePerfCounters.java 2020-01-17 17:09:13.941133574 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,20 @@ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters */ + +/* @test TestMetaspacePerfCountersShenandoah + * @bug 8014659 + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib / + * @summary Tests that performance counters for metaspace and compressed class + * space exists and works. + * @modules java.base/jdk.internal.misc + * java.compiler + * java.management/sun.management + * jdk.internal.jvmstat/sun.jvmstat.monitor + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters + */ public class TestMetaspacePerfCounters { public static Class fooClass = null; private static final String[] counterNames = {"minCapacity", "maxCapacity", "capacity", "used"}; --- old/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java 2020-01-17 17:09:14.635133536 +0100 +++ new/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java 2020-01-17 17:09:14.534133542 +0100 @@ -26,7 +26,7 @@ * @bug 8031323 * @summary Verify that object's alignment in eden space is not affected by * SurvivorAlignmentInBytes option. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java 2020-01-17 17:09:15.218133504 +0100 +++ new/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java 2020-01-17 17:09:15.117133509 +0100 @@ -26,7 +26,7 @@ * @bug 8031323 * @summary Verify that objects promoted from eden space to tenured space during * full GC are not aligned to SurvivorAlignmentInBytes value. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java 2020-01-17 17:09:15.802133472 +0100 +++ new/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java 2020-01-17 17:09:15.701133477 +0100 @@ -26,7 +26,7 @@ * @bug 8031323 * @summary Verify that objects promoted from survivor space to tenured space * during full GC are not aligned to SurvivorAlignmentInBytes value. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 2020-01-17 17:09:16.387133439 +0100 +++ new/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 2020-01-17 17:09:16.285133445 +0100 @@ -27,7 +27,7 @@ * @summary Verify that objects promoted from survivor space to tenured space * when their age exceeded tenuring threshold are not aligned to * SurvivorAlignmentInBytes value. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java 2020-01-17 17:09:16.964133408 +0100 +++ new/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java 2020-01-17 17:09:16.866133413 +0100 @@ -26,7 +26,7 @@ * @bug 8031323 * @summary Verify that objects promoted from eden space to survivor space after * minor GC are aligned to SurvivorAlignmentInBytes. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/gc/whitebox/TestWBGC.java 2020-01-17 17:09:17.548133375 +0100 +++ new/test/hotspot/jtreg/gc/whitebox/TestWBGC.java 2020-01-17 17:09:17.446133381 +0100 @@ -25,7 +25,7 @@ * @test TestWBGC * @bug 8055098 * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management --- old/test/hotspot/jtreg/resourcehogs/serviceability/sa/TestHeapDumpForLargeArray.java 2020-01-17 17:09:18.151133342 +0100 +++ new/test/hotspot/jtreg/resourcehogs/serviceability/sa/TestHeapDumpForLargeArray.java 2020-01-17 17:09:18.041133348 +0100 @@ -47,6 +47,7 @@ * @library /test/lib * @bug 8171084 * @requires vm.hasSAandCanAttach & (vm.bits == "64" & os.maxMemory > 8g) + * @requires vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * jdk.hotspot.agent/sun.jvm.hotspot * jdk.hotspot.agent/sun.jvm.hotspot.utilities --- old/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java 2020-01-17 17:09:18.735133310 +0100 +++ new/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java 2020-01-17 17:09:18.638133315 +0100 @@ -37,6 +37,7 @@ import jdk.test.lib.Platform; import jdk.test.lib.process.ProcessTools; import jdk.test.lib.process.OutputAnalyzer; +import sun.hotspot.gc.GC; import sun.hotspot.code.Compiler; @@ -61,6 +62,9 @@ testCompressedOopsModes(args, "-XX:+UseSerialGC"); testCompressedOopsModes(args, "-XX:+UseParallelGC"); testCompressedOopsModes(args, "-XX:+UseParallelOldGC"); + if (GC.Shenandoah.isSupported()) { + testCompressedOopsModes(args, "-XX:+UnlockExperimentalVMOptions", "-XX:+UseShenandoahGC"); + } } public static void testCompressedOopsModes(ArrayList flags1, String... flags2) throws Exception { --- old/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java 2020-01-17 17:09:19.331133277 +0100 +++ new/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java 2020-01-17 17:09:19.228133283 +0100 @@ -36,6 +36,7 @@ import jdk.test.lib.process.ProcessTools; import sun.hotspot.code.Compiler; +import sun.hotspot.gc.GC; public class MemberNameLeak { static class Leak { @@ -61,6 +62,7 @@ // Run this Leak class with logging ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( "-Xlog:membername+table=trace", + "-XX:+UnlockExperimentalVMOptions", gc, Leak.class.getName()); OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldContain("ResolvedMethod entry added for MemberNameLeak$Leak.callMe()V"); @@ -73,8 +75,11 @@ test("-XX:+UseG1GC"); test("-XX:+UseParallelGC"); test("-XX:+UseSerialGC"); - if (!Compiler.isGraalEnabled()) { // Graal does not support CMS + if (!Compiler.isGraalEnabled()) { // Graal does not support CMS and Shenandoah test("-XX:+UseConcMarkSweepGC"); + if (GC.Shenandoah.isSupported()) { + test("-XX:+UseShenandoahGC"); + } } } } --- old/test/hotspot/jtreg/serviceability/sa/ClhsdbJhisto.java 2020-01-17 17:09:19.920133245 +0100 +++ new/test/hotspot/jtreg/serviceability/sa/ClhsdbJhisto.java 2020-01-17 17:09:19.819133250 +0100 @@ -34,6 +34,7 @@ * @bug 8191658 * @summary Test clhsdb jhisto command * @requires vm.hasSA + * @requires vm.gc != "Shenandoah" * @library /test/lib * @run main/othervm ClhsdbJhisto */ --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java 2020-01-17 17:09:20.513133212 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java 2020-01-17 17:09:20.409133218 +0100 @@ -27,7 +27,7 @@ * @test * @summary Test checks the consistency of the output * displayed with jstat -gccapacity. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @library /test/lib * @library ../share --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java 2020-01-17 17:09:21.103133180 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java 2020-01-17 17:09:21.000133185 +0100 @@ -32,7 +32,7 @@ * @library /test/lib * @library ../share * @requires vm.opt.ExplicitGCInvokesConcurrent != true - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @run main/othervm -XX:+UsePerfData -Xmx128M GcCauseTest01 */ import utils.*; --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java 2020-01-17 17:09:21.681133148 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java 2020-01-17 17:09:21.579133153 +0100 @@ -28,7 +28,7 @@ * test forces debuggee application eat ~70% of heap and runs jstat. * jstat should show actual usage of old gen (OC/OU ~= old gen usage). * @requires vm.opt.ExplicitGCInvokesConcurrent != true - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @library /test/lib * @library ../share --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java 2020-01-17 17:09:22.264133116 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java 2020-01-17 17:09:22.163133121 +0100 @@ -27,7 +27,7 @@ * Test scenario: * test forces debuggee application call System.gc(), runs jstat and checks that * cause of last garbage collection displayed by jstat (LGCC) is 'System.gc()'. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @library /test/lib * @library ../share --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java 2020-01-17 17:09:22.834133084 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java 2020-01-17 17:09:22.736133090 +0100 @@ -29,7 +29,7 @@ * test several times provokes garbage collection in the debuggee application and after each garbage * collection runs jstat. jstat should show that after garbage collection number of GC events and garbage * collection time increase. - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @library /test/lib * @library ../share --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java 2020-01-17 17:09:23.412133052 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java 2020-01-17 17:09:23.313133058 +0100 @@ -35,7 +35,7 @@ * @library /test/lib * @library ../share * @requires vm.opt.ExplicitGCInvokesConcurrent != true - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @run main/othervm -XX:+UsePerfData -Xmx128M GcTest01 */ import utils.*; --- old/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java 2020-01-17 17:09:24.003133020 +0100 +++ new/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java 2020-01-17 17:09:23.901133025 +0100 @@ -28,7 +28,7 @@ * test forces debuggee application eat ~70% of heap and runs jstat. * jstat should show actual usage of old gen (OC/OU ~= old gen usage). * @requires vm.opt.ExplicitGCInvokesConcurrent != true - * @requires vm.gc != "Z" + * @requires vm.gc != "Z" & vm.gc != "Shenandoah" * @modules java.base/jdk.internal.misc * @library /test/lib * @library ../share --- old/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java 2020-01-17 17:09:24.588132988 +0100 +++ new/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java 2020-01-17 17:09:24.488132993 +0100 @@ -72,15 +72,16 @@ Boolean isUseG1GCon = wb.getBooleanVMFlag("UseG1GC"); Boolean isUseConcMarkSweepGCon = wb.getBooleanVMFlag("UseConcMarkSweepGC"); Boolean isUseZGCon = wb.getBooleanVMFlag("UseZGC"); + Boolean isShenandoahGCon = wb.getBooleanVMFlag("UseShenandoahGC"); Boolean isUseEpsilonGCon = wb.getBooleanVMFlag("UseEpsilonGC"); if (Compiler.isGraalEnabled() && - (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon)) { + (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) { return; // Graal does not support these GCs } String keyPhrase; - if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon) { + if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon || isShenandoahGCon) { keyPhrase = "GC"; } else { keyPhrase = "Pause Full"; --- old/test/lib/jdk/test/lib/jfr/EventNames.java 2020-01-17 17:09:25.169132955 +0100 +++ new/test/lib/jdk/test/lib/jfr/EventNames.java 2020-01-17 17:09:25.071132961 +0100 @@ -98,6 +98,8 @@ public final static String G1HeapSummary = PREFIX + "G1HeapSummary"; public final static String G1HeapRegionInformation = PREFIX + "G1HeapRegionInformation"; public final static String G1HeapRegionTypeChange = PREFIX + "G1HeapRegionTypeChange"; + public final static String ShenandoahHeapRegionInformation = PREFIX + "ShenandoahHeapRegionInformation"; + public final static String ShenandoahHeapRegionStateChange = PREFIX + "ShenandoahHeapRegionStateChange"; public final static String TenuringDistribution = PREFIX + "TenuringDistribution"; public final static String GarbageCollection = PREFIX + "GarbageCollection"; public final static String ParallelOldCollection = PREFIX + "ParallelOldGarbageCollection"; --- old/test/lib/jdk/test/lib/jfr/GCHelper.java 2020-01-17 17:09:25.761132923 +0100 +++ new/test/lib/jdk/test/lib/jfr/GCHelper.java 2020-01-17 17:09:25.659132928 +0100 @@ -80,6 +80,7 @@ public static final String pauseLevelEvent = "GCPhasePauseLevel"; private static final List g1HeapRegionTypes; + private static final List shenandoahHeapRegionStates; private static PrintStream defaultErrorLog = null; public static int getGcId(RecordedEvent event) { @@ -207,6 +208,21 @@ }; g1HeapRegionTypes = Collections.unmodifiableList(Arrays.asList(g1HeapRegionTypeLiterals)); + + String[] shenandoahHeapRegionStateLiterals = new String[] { + "Empty Uncommitted", + "Empty Committed", + "Regular", + "Humongous Start", + "Humongous Continuation", + "Humongous Start, Pinned", + "Collection Set", + "Pinned", + "Collection Set, Pinned", + "Trash" + }; + + shenandoahHeapRegionStates = Collections.unmodifiableList(Arrays.asList(shenandoahHeapRegionStateLiterals)); } /** @@ -443,6 +459,13 @@ return g1HeapRegionTypes.contains(type); } + public static boolean assertIsValidShenandoahHeapRegionState(final String state) { + if (!shenandoahHeapRegionStates.contains(state)) { + throw new AssertionError("Unknown state '" + state + "', valid heap region states are " + shenandoahHeapRegionStates); + } + return true; + } + /** * Helper function to align heap size up. * --- old/test/lib/sun/hotspot/gc/GC.java 2020-01-17 17:09:26.346132891 +0100 +++ new/test/lib/sun/hotspot/gc/GC.java 2020-01-17 17:09:26.244132896 +0100 @@ -38,7 +38,8 @@ ConcMarkSweep(3), G1(4), Epsilon(5), - Z(6); + Z(6), + Shenandoah(7); private static final WhiteBox WB = WhiteBox.getWhiteBox(); --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp 2020-01-17 17:09:26.820132865 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" + +#define __ masm->masm()-> + +void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) { + Register addr = _addr->as_register_lo(); + Register newval = _new_value->as_register(); + Register cmpval = _cmp_value->as_register(); + Register tmp1 = _tmp1->as_register(); + Register tmp2 = _tmp2->as_register(); + Register result = result_opr()->as_register(); + + ShenandoahBarrierSet::assembler()->storeval_barrier(masm->masm(), newval, rscratch2); + + if (UseCompressedOops) { + __ encode_heap_oop(tmp1, cmpval); + cmpval = tmp1; + __ encode_heap_oop(tmp2, newval); + newval = tmp2; + } + + ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, result); +} + +#undef __ + +#ifdef ASSERT +#define __ gen->lir(__FILE__, __LINE__)-> +#else +#define __ gen->lir()-> +#endif + +LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { + BasicType bt = access.type(); + if (access.is_oop()) { + LIRGenerator *gen = access.gen(); + if (ShenandoahSATBBarrier) { + pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(), + LIR_OprFact::illegalOpr /* pre_val */); + } + if (ShenandoahCASBarrier) { + cmp_value.load_item(); + new_value.load_item(); + + LIR_Opr t1 = gen->new_register(T_OBJECT); + LIR_Opr t2 = gen->new_register(T_OBJECT); + LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base(); + LIR_Opr result = gen->new_register(T_INT); + + __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result)); + return result; + } + } + return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); +} + +LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { + LIRGenerator* gen = access.gen(); + BasicType type = access.type(); + + LIR_Opr result = gen->new_register(type); + value.load_item(); + LIR_Opr value_opr = value.result(); + + if (access.is_oop()) { + value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators()); + } + + assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type"); + LIR_Opr tmp = gen->new_register(T_INT); + __ xchg(access.resolved_addr(), value_opr, result, tmp); + + if (access.is_oop()) { + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); + LIR_Opr tmp = gen->new_register(type); + __ move(result, tmp); + result = tmp; + if (ShenandoahSATBBarrier) { + pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, + result /* pre_val */); + } + } + + return result; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp 2020-01-17 17:09:27.426132831 +0100 @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/interp_masm.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/thread.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" +#endif + +#define __ masm-> + +address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; + +void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register src, Register dst, Register count, RegSet saved_regs) { + if (is_oop) { + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { + + Label done; + + // Avoid calling runtime if count == 0 + __ cbz(count, done); + + // Is marking active? + Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ ldrb(rscratch1, gc_state); + if (dest_uninitialized) { + __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); + } else { + __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING); + __ tst(rscratch1, rscratch2); + __ br(Assembler::EQ, done); + } + + __ push(saved_regs, sp); + if (UseCompressedOops) { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), src, dst, count); + } + } else { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), src, dst, count); + } + } + __ pop(saved_regs, sp); + __ bind(done); + } + } +} + +void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call) { + if (ShenandoahSATBBarrier) { + satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); + } +} + +void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call) { + // If expand_call is true then we expand the call_VM_leaf macro + // directly to skip generating the check by + // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. + + assert(thread == rthread, "must be"); + + Label done; + Label runtime; + + assert_different_registers(obj, pre_val, tmp, rscratch1); + assert(pre_val != noreg && tmp != noreg, "expecting a register"); + + Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); + Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); + Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); + + // Is marking active? + if (in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 4) { + __ ldrw(tmp, in_progress); + } else { + assert(in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); + __ ldrb(tmp, in_progress); + } + __ cbzw(tmp, done); + + // Do we need to load the previous value? + if (obj != noreg) { + __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); + } + + // Is the previous value null? + __ cbz(pre_val, done); + + // Can we store original value in the thread's buffer? + // Is index == 0? + // (The index field is typed as size_t.) + + __ ldr(tmp, index); // tmp := *index_adr + __ cbz(tmp, runtime); // tmp == 0? + // If yes, goto runtime + + __ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize + __ str(tmp, index); // *index_adr := tmp + __ ldr(rscratch1, buffer); + __ add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr + + // Record the previous value + __ str(pre_val, Address(tmp, 0)); + __ b(done); + + __ bind(runtime); + // save the live input values + RegSet saved = RegSet::of(pre_val); + if (tosca_live) saved += RegSet::of(r0); + if (obj != noreg) saved += RegSet::of(obj); + + __ push(saved, sp); + + // Calling the runtime using the regular call_VM_leaf mechanism generates + // code (generated by InterpreterMacroAssember::call_VM_leaf_base) + // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. + // + // If we care generating the pre-barrier without a frame (e.g. in the + // intrinsified Reference.get() routine) then ebp might be pointing to + // the caller frame and so this check will most likely fail at runtime. + // + // Expanding the call directly bypasses the generation of the check. + // So when we do not have have a full interpreter frame on the stack + // expand_call should be passed true. + + if (expand_call) { + assert(pre_val != c_rarg1, "smashed arg"); + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); + } + + __ pop(saved, sp); + + __ bind(done); +} + +void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) { + assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); + Label is_null; + __ cbz(dst, is_null); + resolve_forward_pointer_not_null(masm, dst, tmp); + __ bind(is_null); +} + +// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitely +// passed in. +void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) { + assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); + // The below loads the mark word, checks if the lowest two bits are + // set, and if so, clear the lowest two bits and copy the result + // to dst. Otherwise it leaves dst alone. + // Implementing this is surprisingly awkward. I do it here by: + // - Inverting the mark word + // - Test lowest two bits == 0 + // - If so, set the lowest two bits + // - Invert the result back, and copy to dst + + bool borrow_reg = (tmp == noreg); + if (borrow_reg) { + // No free registers available. Make one useful. + tmp = rscratch1; + if (tmp == dst) { + tmp = rscratch2; + } + __ push(RegSet::of(tmp), sp); + } + + assert_different_registers(tmp, dst); + + Label done; + __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes())); + __ eon(tmp, tmp, zr); + __ ands(zr, tmp, markOopDesc::lock_mask_in_place); + __ br(Assembler::NE, done); + __ orr(tmp, tmp, markOopDesc::marked_value); + __ eon(dst, tmp, zr); + __ bind(done); + + if (borrow_reg) { + __ pop(RegSet::of(tmp), sp); + } +} + +void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Address load_addr) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); + assert(dst != rscratch2, "need rscratch2"); + assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2); + + Label done; + __ enter(); + Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ ldrb(rscratch2, gc_state); + + // Check for heap stability + __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); + + // use r1 for load address + Register result_dst = dst; + if (dst == r1) { + __ mov(rscratch1, dst); + dst = rscratch1; + } + + // Save r0 and r1, unless it is an output register + RegSet to_save = RegSet::of(r0, r1) - result_dst; + __ push(to_save, sp); + __ lea(r1, load_addr); + __ mov(r0, dst); + + __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); + + __ mov(result_dst, r0); + __ pop(to_save, sp); + + __ bind(done); + __ leave(); +} + +void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { + if (ShenandoahStoreValEnqueueBarrier) { + // Save possibly live regs. + RegSet live_regs = RegSet::range(r0, r4) - dst; + __ push(live_regs, sp); + __ strd(v0, __ pre(sp, 2 * -wordSize)); + + satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false); + + // Restore possibly live regs. + __ ldrd(v0, __ post(sp, 2 * wordSize)); + __ pop(live_regs, sp); + } +} + +void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr) { + if (ShenandoahLoadRefBarrier) { + Label is_null; + __ cbz(dst, is_null); + load_reference_barrier_not_null(masm, dst, load_addr); + __ bind(is_null); + } +} + +void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Address src, Register tmp1, Register tmp_thread) { + // 1: non-reference load, no additional barrier is needed + if (!is_reference_type(type)) { + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + return; + } + + // 2: load a reference from src location and apply LRB if needed + if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { + Register result_dst = dst; + + // Preserve src location for LRB + if (dst == src.base() || dst == src.index()) { + dst = rscratch1; + } + assert_different_registers(dst, src.base(), src.index()); + + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + + load_reference_barrier(masm, dst, src); + + if (dst != result_dst) { + __ mov(result_dst, dst); + dst = result_dst; + } + } else { + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + } + + // 3: apply keep-alive barrier if needed + if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { + __ enter(); + satb_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, + rthread /* thread */, + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); + __ leave(); + } +} + +void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Address dst, Register val, Register tmp1, Register tmp2) { + bool on_oop = type == T_OBJECT || type == T_ARRAY; + if (!on_oop) { + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + return; + } + + // flatten object address if needed + if (dst.index() == noreg && dst.offset() == 0) { + if (dst.base() != r3) { + __ mov(r3, dst.base()); + } + } else { + __ lea(r3, dst); + } + + shenandoah_write_barrier_pre(masm, + r3 /* obj */, + tmp2 /* pre_val */, + rthread /* thread */, + tmp1 /* tmp */, + val != noreg /* tosca_live */, + false /* expand_call */); + + if (val == noreg) { + BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg); + } else { + storeval_barrier(masm, val, tmp1); + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops) { + new_val = rscratch2; + __ mov(new_val, val); + } + BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg); + } + +} + +void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, + Register obj, Register tmp, Label& slowpath) { + Label done; + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); + + // Check for null. + __ cbz(obj, done); + + assert(obj != rscratch2, "need rscratch2"); + Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); + __ lea(rscratch2, gc_state); + __ ldrb(rscratch2, Address(rscratch2)); + + // Check for heap in evacuation phase + __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath); + + __ bind(done); +} + + +void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, + bool acquire, bool release, bool weak, bool is_cae, + Register result) { + Register tmp1 = rscratch1; + Register tmp2 = rscratch2; + bool is_narrow = UseCompressedOops; + Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword; + + assert_different_registers(addr, expected, new_val, tmp1, tmp2); + + Label retry, done, fail; + + // CAS, using LL/SC pair. + __ bind(retry); + __ load_exclusive(tmp1, addr, size, acquire); + if (is_narrow) { + __ cmpw(tmp1, expected); + } else { + __ cmp(tmp1, expected); + } + __ br(Assembler::NE, fail); + __ store_exclusive(tmp2, new_val, addr, size, release); + if (weak) { + __ cmpw(tmp2, 0u); // If the store fails, return NE to our caller + } else { + __ cbnzw(tmp2, retry); + } + __ b(done); + + __ bind(fail); + // Check if rb(expected)==rb(tmp1) + // Shuffle registers so that we have memory value ready for next expected. + __ mov(tmp2, expected); + __ mov(expected, tmp1); + if (is_narrow) { + __ decode_heap_oop(tmp1, tmp1); + __ decode_heap_oop(tmp2, tmp2); + } + resolve_forward_pointer(masm, tmp1); + resolve_forward_pointer(masm, tmp2); + __ cmp(tmp1, tmp2); + // Retry with expected now being the value we just loaded from addr. + __ br(Assembler::EQ, retry); + if (is_cae && is_narrow) { + // For cmp-and-exchange and narrow oops, we need to restore + // the compressed old-value. We moved it to 'expected' a few lines up. + __ mov(tmp1, expected); + } + __ bind(done); + + if (is_cae) { + __ mov(result, tmp1); + } else { + __ cset(result, Assembler::EQ); + } +} + +#undef __ + +#ifdef COMPILER1 + +#define __ ce->masm()-> + +void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { + ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); + // At this point we know that marking is in progress. + // If do_load() is true then we have to emit the + // load of the previous value; otherwise it has already + // been loaded into _pre_val. + + __ bind(*stub->entry()); + + assert(stub->pre_val()->is_register(), "Precondition."); + + Register pre_val_reg = stub->pre_val()->as_register(); + + if (stub->do_load()) { + ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); + } + __ cbz(pre_val_reg, *stub->continuation()); + ce->store_parameter(stub->pre_val()->as_register(), 0); + __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); + __ b(*stub->continuation()); +} + +void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { + ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); + __ bind(*stub->entry()); + + Register obj = stub->obj()->as_register(); + Register res = stub->result()->as_register(); + Register addr = stub->addr()->as_pointer_register(); + Register tmp1 = stub->tmp1()->as_register(); + Register tmp2 = stub->tmp2()->as_register(); + + assert(res == r0, "result must arrive in r0"); + + if (res != obj) { + __ mov(res, obj); + } + + // Check for null. + __ cbz(res, *stub->continuation()); + + // Check for object in cset. + __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr()); + __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint()); + __ ldrb(tmp2, Address(tmp2, tmp1)); + __ cbz(tmp2, *stub->continuation()); + + // Check if object is already forwarded. + Label slow_path; + __ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes())); + __ eon(tmp1, tmp1, zr); + __ ands(zr, tmp1, markOopDesc::lock_mask_in_place); + __ br(Assembler::NE, slow_path); + + // Decode forwarded object. + __ orr(tmp1, tmp1, markOopDesc::marked_value); + __ eon(res, tmp1, zr); + __ b(*stub->continuation()); + + __ bind(slow_path); + ce->store_parameter(res, 0); + ce->store_parameter(addr, 1); + __ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); + + __ b(*stub->continuation()); +} + +#undef __ + +#define __ sasm-> + +void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { + __ prologue("shenandoah_pre_barrier", false); + + // arg0 : previous value of memory + + BarrierSet* bs = BarrierSet::barrier_set(); + + const Register pre_val = r0; + const Register thread = rthread; + const Register tmp = rscratch1; + + Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); + Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); + + Label done; + Label runtime; + + // Is marking still active? + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ ldrb(tmp, gc_state); + __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); + __ tst(tmp, rscratch2); + __ br(Assembler::EQ, done); + + // Can we store original value in the thread's buffer? + __ ldr(tmp, queue_index); + __ cbz(tmp, runtime); + + __ sub(tmp, tmp, wordSize); + __ str(tmp, queue_index); + __ ldr(rscratch2, buffer); + __ add(tmp, tmp, rscratch2); + __ load_parameter(0, rscratch2); + __ str(rscratch2, Address(tmp, 0)); + __ b(done); + + __ bind(runtime); + __ push_call_clobbered_registers(); + __ load_parameter(0, pre_val); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); + __ pop_call_clobbered_registers(); + __ bind(done); + + __ epilogue(); +} + +void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm) { + __ prologue("shenandoah_load_reference_barrier", false); + // arg0 : object to be resolved + + __ push_call_clobbered_registers(); + __ load_parameter(0, r0); + __ load_parameter(1, r1); + if (UseCompressedOops) { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); + } else { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); + } + __ blr(lr); + __ mov(rscratch1, r0); + __ pop_call_clobbered_registers(); + __ mov(r0, rscratch1); + + __ epilogue(); +} + +#undef __ + +#endif // COMPILER1 + +address ShenandoahBarrierSetAssembler::shenandoah_lrb() { + assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); + return _shenandoah_lrb; +} + +#define __ cgen->assembler()-> + +// Shenandoah load reference barrier. +// +// Input: +// r0: OOP to evacuate. Not null. +// r1: load address +// +// Output: +// r0: Pointer to evacuated OOP. +// +// Trash rscratch1, rscratch2. Preserve everything else. +address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { + + __ align(6); + StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); + address start = __ pc(); + + Label work, done; + __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); + __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); + __ ldrb(rscratch2, Address(rscratch2, rscratch1)); + __ tbnz(rscratch2, 0, work); + __ ret(lr); + __ bind(work); + + Label slow_path; + __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); + __ eon(rscratch1, rscratch1, zr); + __ ands(zr, rscratch1, markOopDesc::lock_mask_in_place); + __ br(Assembler::NE, slow_path); + + // Decode forwarded object. + __ orr(rscratch1, rscratch1, markOopDesc::marked_value); + __ eon(r0, rscratch1, zr); + __ ret(lr); + + __ bind(slow_path); + __ enter(); // required for proper stackwalking of RuntimeStub frame + + __ push_call_clobbered_registers(); + + if (UseCompressedOops) { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); + } else { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); + } + __ blr(lr); + __ mov(rscratch1, r0); + __ pop_call_clobbered_registers(); + __ mov(r0, rscratch1); + + __ leave(); // required for proper stackwalking of RuntimeStub frame + __ bind(done); + __ ret(lr); + + return start; +} + +#undef __ + +void ShenandoahBarrierSetAssembler::barrier_stubs_init() { + if (ShenandoahLoadRefBarrier) { + int stub_code_size = 2048; + ResourceMark rm; + BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); + CodeBuffer buf(bb); + StubCodeGenerator cgen(&buf); + _shenandoah_lrb = generate_shenandoah_lrb(&cgen); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp 2020-01-17 17:09:28.029132798 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" +#ifdef COMPILER1 +class LIR_Assembler; +class ShenandoahPreBarrierStub; +class ShenandoahLoadReferenceBarrierStub; +class StubAssembler; +#endif +class StubCodeGenerator; + +class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { +private: + + static address _shenandoah_lrb; + + void satb_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call); + void shenandoah_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call); + + void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg); + void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg); + void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr); + void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Address load_addr); + + address generate_shenandoah_lrb(StubCodeGenerator* cgen); + +public: + static address shenandoah_lrb(); + + void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); + +#ifdef COMPILER1 + void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); + void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); + void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); + void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm); +#endif + + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register src, Register dst, Register count, RegSet saved_regs); + virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Address src, Register tmp1, Register tmp_thread); + virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Address dst, Register val, Register tmp1, Register tmp2); + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, + Register obj, Register tmp, Label& slowpath); + virtual void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, + bool acquire, bool release, bool weak, bool is_cae, Register result); + + virtual void barrier_stubs_init(); +}; + +#endif // CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad 2020-01-17 17:09:28.638132764 +0100 @@ -0,0 +1,187 @@ +// +// Copyright (c) 2018, Red Hat, Inc. All rights reserved. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// +// + +source_hpp %{ +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +%} + +encode %{ + enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{ + MacroAssembler _masm(&cbuf); + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); + %} + + enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{ + MacroAssembler _masm(&cbuf); + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); + %} +%} + +instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ + + match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval))); + ins_cost(2 * VOLATILE_REF_COST); + + effect(TEMP tmp, KILL cr); + + format %{ + "cmpxchg_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" + %} + + ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp, res)); + + ins_pipe(pipe_slow); +%} + +instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ + + match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval))); + ins_cost(2 * VOLATILE_REF_COST); + + effect(TEMP tmp, KILL cr); + + format %{ + "cmpxchgw_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" + %} + + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); + %} + + ins_pipe(pipe_slow); +%} + +instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ + + predicate(needs_acquiring_load_exclusive(n)); + match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval))); + ins_cost(VOLATILE_REF_COST); + + effect(TEMP tmp, KILL cr); + + format %{ + "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" + %} + + ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp, res)); + + ins_pipe(pipe_slow); +%} + +instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ + + predicate(needs_acquiring_load_exclusive(n)); + match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval))); + ins_cost(VOLATILE_REF_COST); + + effect(TEMP tmp, KILL cr); + + format %{ + "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" + %} + + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); + %} + + ins_pipe(pipe_slow); +%} + +instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ + match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval))); + ins_cost(3 * VOLATILE_REF_COST); + effect(TEMP_DEF res, TEMP tmp, KILL cr); + format %{ + "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval" + %} + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register); + %} + ins_pipe(pipe_slow); +%} + +instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ + match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval))); + ins_cost(3 * VOLATILE_REF_COST); + effect(TEMP_DEF res, TEMP tmp, KILL cr); + format %{ + "cmpxchg_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" + %} + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ true, $res$$Register); + %} + ins_pipe(pipe_slow); +%} + +instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ + match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval))); + ins_cost(3 * VOLATILE_REF_COST); + effect(TEMP tmp, KILL cr); + format %{ + "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval" + %} + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ false, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register); + %} + ins_pipe(pipe_slow); +%} + +instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ + match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval))); + ins_cost(3 * VOLATILE_REF_COST); + effect(TEMP tmp, KILL cr); + format %{ + "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" + %} + ins_encode %{ + Register tmp = $tmp$$Register; + __ mov(tmp, $oldval$$Register); // Must not clobber oldval. + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, + /*acquire*/ false, /*release*/ true, /*weak*/ true, /*is_cae*/ false, $res$$Register); + %} + ins_pipe(pipe_slow); +%} + + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp 2020-01-17 17:09:29.249132731 +0100 @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" + +#define __ masm->masm()-> + +void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) { + NOT_LP64(assert(_addr->is_single_cpu(), "must be single");) + Register addr = _addr->is_single_cpu() ? _addr->as_register() : _addr->as_register_lo(); + Register newval = _new_value->as_register(); + Register cmpval = _cmp_value->as_register(); + Register tmp1 = _tmp1->as_register(); + Register tmp2 = _tmp2->as_register(); + Register result = result_opr()->as_register(); + assert(cmpval == rax, "wrong register"); + assert(newval != NULL, "new val must be register"); + assert(cmpval != newval, "cmp and new values must be in different registers"); + assert(cmpval != addr, "cmp and addr must be in different registers"); + assert(newval != addr, "new value and addr must be in different registers"); + + // Apply storeval barrier to newval. + ShenandoahBarrierSet::assembler()->storeval_barrier(masm->masm(), newval, tmp1); + +#ifdef _LP64 + if (UseCompressedOops) { + __ encode_heap_oop(cmpval); + __ mov(rscratch1, newval); + __ encode_heap_oop(rscratch1); + newval = rscratch1; + } +#endif + + ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), result, Address(addr, 0), cmpval, newval, false, tmp1, tmp2); +} + +#undef __ + +#ifdef ASSERT +#define __ gen->lir(__FILE__, __LINE__)-> +#else +#define __ gen->lir()-> +#endif + +LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { + + if (access.is_oop()) { + LIRGenerator* gen = access.gen(); + if (ShenandoahSATBBarrier) { + pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(), + LIR_OprFact::illegalOpr /* pre_val */); + } + if (ShenandoahCASBarrier) { + cmp_value.load_item_force(FrameMap::rax_oop_opr); + new_value.load_item(); + + LIR_Opr t1 = gen->new_register(T_OBJECT); + LIR_Opr t2 = gen->new_register(T_OBJECT); + LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base(); + LIR_Opr result = gen->new_register(T_INT); + + __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result)); + return result; + } + } + return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); +} + +LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { + LIRGenerator* gen = access.gen(); + BasicType type = access.type(); + + LIR_Opr result = gen->new_register(type); + value.load_item(); + LIR_Opr value_opr = value.result(); + + if (access.is_oop()) { + value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators()); + } + + // Because we want a 2-arg form of xchg and xadd + __ move(value_opr, result); + + assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type"); + __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr); + + if (access.is_oop()) { + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); + LIR_Opr tmp = gen->new_register(type); + __ move(result, tmp); + result = tmp; + if (ShenandoahSATBBarrier) { + pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, + result /* pre_val */); + } + } + + return result; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp 2020-01-17 17:09:29.849132698 +0100 @@ -0,0 +1,948 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/interp_masm.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/thread.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" +#endif + +#define __ masm-> + +address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; + +void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + if (type == T_OBJECT || type == T_ARRAY) { + + if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { +#ifdef _LP64 + Register thread = r15_thread; +#else + Register thread = rax; + if (thread == src || thread == dst || thread == count) { + thread = rbx; + } + if (thread == src || thread == dst || thread == count) { + thread = rcx; + } + if (thread == src || thread == dst || thread == count) { + thread = rdx; + } + __ push(thread); + __ get_thread(thread); +#endif + assert_different_registers(src, dst, count, thread); + + Label done; + // Short-circuit if count == 0. + __ testptr(count, count); + __ jcc(Assembler::zero, done); + + // Avoid runtime call when not marking. + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + int flags = ShenandoahHeap::HAS_FORWARDED; + if (!dest_uninitialized) { + flags |= ShenandoahHeap::MARKING; + } + __ testb(gc_state, flags); + __ jcc(Assembler::zero, done); + + __ pusha(); // push registers +#ifdef _LP64 + assert(src == rdi, "expected"); + assert(dst == rsi, "expected"); + assert(count == rdx, "expected"); + if (UseCompressedOops) { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), src, dst, count); + } + } else +#endif + { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), src, dst, count); + } + } + __ popa(); + __ bind(done); + NOT_LP64(__ pop(thread);) + } + } + +} + +void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call) { + + if (ShenandoahSATBBarrier) { + satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); + } +} + +void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call) { + // If expand_call is true then we expand the call_VM_leaf macro + // directly to skip generating the check by + // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. + +#ifdef _LP64 + assert(thread == r15_thread, "must be"); +#endif // _LP64 + + Label done; + Label runtime; + + assert(pre_val != noreg, "check this code"); + + if (obj != noreg) { + assert_different_registers(obj, pre_val, tmp); + assert(pre_val != rax, "check this code"); + } + + Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); + Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); + Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); + + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); + __ jcc(Assembler::zero, done); + + // Do we need to load the previous value? + if (obj != noreg) { + __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); + } + + // Is the previous value null? + __ cmpptr(pre_val, (int32_t) NULL_WORD); + __ jcc(Assembler::equal, done); + + // Can we store original value in the thread's buffer? + // Is index == 0? + // (The index field is typed as size_t.) + + __ movptr(tmp, index); // tmp := *index_adr + __ cmpptr(tmp, 0); // tmp == 0? + __ jcc(Assembler::equal, runtime); // If yes, goto runtime + + __ subptr(tmp, wordSize); // tmp := tmp - wordSize + __ movptr(index, tmp); // *index_adr := tmp + __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr + + // Record the previous value + __ movptr(Address(tmp, 0), pre_val); + __ jmp(done); + + __ bind(runtime); + // save the live input values + if(tosca_live) __ push(rax); + + if (obj != noreg && obj != rax) + __ push(obj); + + if (pre_val != rax) + __ push(pre_val); + + // Calling the runtime using the regular call_VM_leaf mechanism generates + // code (generated by InterpreterMacroAssember::call_VM_leaf_base) + // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. + // + // If we care generating the pre-barrier without a frame (e.g. in the + // intrinsified Reference.get() routine) then ebp might be pointing to + // the caller frame and so this check will most likely fail at runtime. + // + // Expanding the call directly bypasses the generation of the check. + // So when we do not have have a full interpreter frame on the stack + // expand_call should be passed true. + + NOT_LP64( __ push(thread); ) + +#ifdef _LP64 + // We move pre_val into c_rarg0 early, in order to avoid smashing it, should + // pre_val be c_rarg1 (where the call prologue would copy thread argument). + // Note: this should not accidentally smash thread, because thread is always r15. + assert(thread != c_rarg0, "smashed arg"); + if (c_rarg0 != pre_val) { + __ mov(c_rarg0, pre_val); + } +#endif + + if (expand_call) { + LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) +#ifdef _LP64 + if (c_rarg1 != thread) { + __ mov(c_rarg1, thread); + } + // Already moved pre_val into c_rarg0 above +#else + __ push(thread); + __ push(pre_val); +#endif + __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); + } + + NOT_LP64( __ pop(thread); ) + + // save the live input values + if (pre_val != rax) + __ pop(pre_val); + + if (obj != noreg && obj != rax) + __ pop(obj); + + if(tosca_live) __ pop(rax); + + __ bind(done); +} + +void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Address src) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); + + Label done; + +#ifdef _LP64 + Register thread = r15_thread; +#else + Register thread = rcx; + if (thread == dst) { + thread = rbx; + } + __ push(thread); + __ get_thread(thread); +#endif + + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); + __ jccb(Assembler::zero, done); + + // Use rsi for src address + const Register src_addr = rsi; + // Setup address parameter first, if it does not clobber oop in dst + bool need_addr_setup = (src_addr != dst); + + if (need_addr_setup) { + __ push(src_addr); + __ lea(src_addr, src); + + if (dst != rax) { + // Move obj into rax and save rax + __ push(rax); + __ movptr(rax, dst); + } + } else { + // dst == rsi + __ push(rax); + __ movptr(rax, dst); + + // we can clobber it, since it is outgoing register + __ lea(src_addr, src); + } + + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); + + if (need_addr_setup) { + if (dst != rax) { + __ movptr(dst, rax); + __ pop(rax); + } + __ pop(src_addr); + } else { + __ movptr(dst, rax); + __ pop(rax); + } + + __ bind(done); + +#ifndef _LP64 + __ pop(thread); +#endif +} + +void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { + if (ShenandoahStoreValEnqueueBarrier) { + storeval_barrier_impl(masm, dst, tmp); + } +} + +void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { + assert(ShenandoahStoreValEnqueueBarrier, "should be enabled"); + + if (dst == noreg) return; + + if (ShenandoahStoreValEnqueueBarrier) { + // The set of registers to be saved+restored is the same as in the write-barrier above. + // Those are the commonly used registers in the interpreter. + __ pusha(); + // __ push_callee_saved_registers(); + __ subptr(rsp, 2 * Interpreter::stackElementSize); + __ movdbl(Address(rsp, 0), xmm0); + +#ifdef _LP64 + Register thread = r15_thread; +#else + Register thread = rcx; + if (thread == dst || thread == tmp) { + thread = rdi; + } + if (thread == dst || thread == tmp) { + thread = rbx; + } + __ get_thread(thread); +#endif + assert_different_registers(dst, tmp, thread); + + satb_write_barrier_pre(masm, noreg, dst, thread, tmp, true, false); + __ movdbl(xmm0, Address(rsp, 0)); + __ addptr(rsp, 2 * Interpreter::stackElementSize); + //__ pop_callee_saved_registers(); + __ popa(); + } +} + +void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src) { + if (ShenandoahLoadRefBarrier) { + Label done; + __ testptr(dst, dst); + __ jcc(Assembler::zero, done); + load_reference_barrier_not_null(masm, dst, src); + __ bind(done); + } +} + +void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Address src, Register tmp1, Register tmp_thread) { + // 1: non-reference load, no additional barrier is needed + if (!is_reference_type(type)) { + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + return; + } + + assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected"); + + // 2: load a reference from src location and apply LRB if needed + if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { + Register result_dst = dst; + bool use_tmp1_for_dst = false; + + // Preserve src location for LRB + if (dst == src.base() || dst == src.index()) { + // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at() + if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) { + dst = tmp1; + use_tmp1_for_dst = true; + } else { + dst = rdi; + __ push(dst); + } + assert_different_registers(dst, src.base(), src.index()); + } + + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + + load_reference_barrier(masm, dst, src); + + // Move loaded oop to final destination + if (dst != result_dst) { + __ movptr(result_dst, dst); + + if (!use_tmp1_for_dst) { + __ pop(dst); + } + + dst = result_dst; + } + } else { + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + } + + // 3: apply keep-alive barrier if needed + if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { + __ push_IU_state(); + Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); + assert_different_registers(dst, tmp1, tmp_thread); + if (!thread->is_valid()) { + thread = rdx; + } + NOT_LP64(__ get_thread(thread)); + // Generate the SATB pre-barrier code to log the value of + // the referent field in an SATB buffer. + shenandoah_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, + thread /* thread */, + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); + __ pop_IU_state(); + } +} + +void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Address dst, Register val, Register tmp1, Register tmp2) { + + bool on_oop = type == T_OBJECT || type == T_ARRAY; + bool in_heap = (decorators & IN_HEAP) != 0; + bool as_normal = (decorators & AS_NORMAL) != 0; + if (on_oop && in_heap) { + bool needs_pre_barrier = as_normal; + + Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi); + Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); + // flatten object address if needed + // We do it regardless of precise because we need the registers + if (dst.index() == noreg && dst.disp() == 0) { + if (dst.base() != tmp1) { + __ movptr(tmp1, dst.base()); + } + } else { + __ lea(tmp1, dst); + } + + assert_different_registers(val, tmp1, tmp2, tmp3, rthread); + +#ifndef _LP64 + __ get_thread(rthread); + InterpreterMacroAssembler *imasm = static_cast(masm); + imasm->save_bcp(); +#endif + + if (needs_pre_barrier) { + shenandoah_write_barrier_pre(masm /*masm*/, + tmp1 /* obj */, + tmp2 /* pre_val */, + rthread /* thread */, + tmp3 /* tmp */, + val != noreg /* tosca_live */, + false /* expand_call */); + } + if (val == noreg) { + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); + } else { + storeval_barrier(masm, val, tmp3); + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); + } + NOT_LP64(imasm->restore_bcp()); + } else { + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + } +} + +void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, + Register obj, Register tmp, Label& slowpath) { + Label done; + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); + + // Check for null. + __ testptr(obj, obj); + __ jcc(Assembler::zero, done); + + Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); + __ testb(gc_state, ShenandoahHeap::EVACUATION); + __ jccb(Assembler::notZero, slowpath); + __ bind(done); +} + +// Special Shenandoah CAS implementation that handles false negatives +// due to concurrent evacuation. +void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, + Register res, Address addr, Register oldval, Register newval, + bool exchange, Register tmp1, Register tmp2) { + assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); + assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); + assert_different_registers(oldval, newval, tmp1, tmp2); + + Label L_success, L_failure; + + // Remember oldval for retry logic below +#ifdef _LP64 + if (UseCompressedOops) { + __ movl(tmp1, oldval); + } else +#endif + { + __ movptr(tmp1, oldval); + } + + // Step 1. Fast-path. + // + // Try to CAS with given arguments. If successful, then we are done. + + if (os::is_MP()) __ lock(); +#ifdef _LP64 + if (UseCompressedOops) { + __ cmpxchgl(newval, addr); + } else +#endif + { + __ cmpxchgptr(newval, addr); + } + __ jcc(Assembler::equal, L_success); + + // Step 2. CAS had failed. This may be a false negative. + // + // The trouble comes when we compare the to-space pointer with the from-space + // pointer to the same object. To resolve this, it will suffice to resolve + // the value from memory -- this will give both to-space pointers. + // If they mismatch, then it was a legitimate failure. + // + // Before reaching to resolve sequence, see if we can avoid the whole shebang + // with filters. + + // Filter: when offending in-memory value is NULL, the failure is definitely legitimate + __ testptr(oldval, oldval); + __ jcc(Assembler::zero, L_failure); + + // Filter: when heap is stable, the failure is definitely legitimate +#ifdef _LP64 + const Register thread = r15_thread; +#else + const Register thread = tmp2; + __ get_thread(thread); +#endif + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); + __ jcc(Assembler::zero, L_failure); + +#ifdef _LP64 + if (UseCompressedOops) { + __ movl(tmp2, oldval); + __ decode_heap_oop(tmp2); + } else +#endif + { + __ movptr(tmp2, oldval); + } + + // Decode offending in-memory value. + // Test if-forwarded + __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markOopDesc::marked_value); + __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded + __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded + + // Load and mask forwarding pointer + __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes())); + __ shrptr(tmp2, 2); + __ shlptr(tmp2, 2); + +#ifdef _LP64 + if (UseCompressedOops) { + __ decode_heap_oop(tmp1); // decode for comparison + } +#endif + + // Now we have the forwarded offender in tmp2. + // Compare and if they don't match, we have legitimate failure + __ cmpptr(tmp1, tmp2); + __ jcc(Assembler::notEqual, L_failure); + + // Step 3. Need to fix the memory ptr before continuing. + // + // At this point, we have from-space oldval in the register, and its to-space + // address is in tmp2. Let's try to update it into memory. We don't care if it + // succeeds or not. If it does, then the retrying CAS would see it and succeed. + // If this fixup fails, this means somebody else beat us to it, and necessarily + // with to-space ptr store. We still have to do the retry, because the GC might + // have updated the reference for us. + +#ifdef _LP64 + if (UseCompressedOops) { + __ encode_heap_oop(tmp2); // previously decoded at step 2. + } +#endif + + if (os::is_MP()) __ lock(); +#ifdef _LP64 + if (UseCompressedOops) { + __ cmpxchgl(tmp2, addr); + } else +#endif + { + __ cmpxchgptr(tmp2, addr); + } + + // Step 4. Try to CAS again. + // + // This is guaranteed not to have false negatives, because oldval is definitely + // to-space, and memory pointer is to-space as well. Nothing is able to store + // from-space ptr into memory anymore. Make sure oldval is restored, after being + // garbled during retries. + // +#ifdef _LP64 + if (UseCompressedOops) { + __ movl(oldval, tmp2); + } else +#endif + { + __ movptr(oldval, tmp2); + } + + if (os::is_MP()) __ lock(); +#ifdef _LP64 + if (UseCompressedOops) { + __ cmpxchgl(newval, addr); + } else +#endif + { + __ cmpxchgptr(newval, addr); + } + if (!exchange) { + __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump + } + + // Step 5. If we need a boolean result out of CAS, set the flag appropriately. + // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS. + // Otherwise, failure witness for CAE is in oldval on all paths, and we can return. + + if (exchange) { + __ bind(L_failure); + __ bind(L_success); + } else { + assert(res != NULL, "need result register"); + + Label exit; + __ bind(L_failure); + __ xorptr(res, res); + __ jmpb(exit); + + __ bind(L_success); + __ movptr(res, 1); + __ bind(exit); + } +} + +#undef __ + +#ifdef COMPILER1 + +#define __ ce->masm()-> + +void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { + ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); + // At this point we know that marking is in progress. + // If do_load() is true then we have to emit the + // load of the previous value; otherwise it has already + // been loaded into _pre_val. + + __ bind(*stub->entry()); + assert(stub->pre_val()->is_register(), "Precondition."); + + Register pre_val_reg = stub->pre_val()->as_register(); + + if (stub->do_load()) { + ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); + } + + __ cmpptr(pre_val_reg, (int32_t)NULL_WORD); + __ jcc(Assembler::equal, *stub->continuation()); + ce->store_parameter(stub->pre_val()->as_register(), 0); + __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); + __ jmp(*stub->continuation()); + +} + +void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { + ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); + __ bind(*stub->entry()); + + Register obj = stub->obj()->as_register(); + Register res = stub->result()->as_register(); + Register addr = stub->addr()->as_register(); + Register tmp1 = stub->tmp1()->as_register(); + Register tmp2 = stub->tmp2()->as_register(); + assert_different_registers(obj, res, addr, tmp1, tmp2); + + Label slow_path; + + assert(res == rax, "result must arrive in rax"); + + if (res != obj) { + __ mov(res, obj); + } + + // Check for null. + __ testptr(res, res); + __ jcc(Assembler::zero, *stub->continuation()); + + // Check for object being in the collection set. + __ mov(tmp1, res); + __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); + __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); +#ifdef _LP64 + __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); + __ testbool(tmp2); +#else + // On x86_32, C1 register allocator can give us the register without 8-bit support. + // Do the full-register access and test to avoid compilation failures. + __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1)); + __ testptr(tmp2, 0xFF); +#endif + __ jcc(Assembler::zero, *stub->continuation()); + + __ bind(slow_path); + ce->store_parameter(res, 0); + ce->store_parameter(addr, 1); + __ call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); + + __ jmp(*stub->continuation()); +} + +#undef __ + +#define __ sasm-> + +void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { + __ prologue("shenandoah_pre_barrier", false); + // arg0 : previous value of memory + + __ push(rax); + __ push(rdx); + + const Register pre_val = rax; + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + const Register tmp = rdx; + + NOT_LP64(__ get_thread(thread);) + + Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); + Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); + + Label done; + Label runtime; + + // Is SATB still active? + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); + __ jcc(Assembler::zero, done); + + // Can we store original value in the thread's buffer? + + __ movptr(tmp, queue_index); + __ testptr(tmp, tmp); + __ jcc(Assembler::zero, runtime); + __ subptr(tmp, wordSize); + __ movptr(queue_index, tmp); + __ addptr(tmp, buffer); + + // prev_val (rax) + __ load_parameter(0, pre_val); + __ movptr(Address(tmp, 0), pre_val); + __ jmp(done); + + __ bind(runtime); + + __ save_live_registers_no_oop_map(true); + + // load the pre-value + __ load_parameter(0, rcx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread); + + __ restore_live_registers(true); + + __ bind(done); + + __ pop(rdx); + __ pop(rax); + + __ epilogue(); +} + +void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm) { + __ prologue("shenandoah_load_reference_barrier", false); + // arg0 : object to be resolved + + __ save_live_registers_no_oop_map(true); + +#ifdef _LP64 + __ load_parameter(0, c_rarg0); + __ load_parameter(1, c_rarg1); + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1); + } +#else + __ load_parameter(0, rax); + __ load_parameter(1, rbx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx); +#endif + + __ restore_live_registers_except_rax(true); + + __ epilogue(); +} + +#undef __ + +#endif // COMPILER1 + +address ShenandoahBarrierSetAssembler::shenandoah_lrb() { + assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); + return _shenandoah_lrb; +} + +#define __ cgen->assembler()-> + +/* + * Incoming parameters: + * rax: oop + * rsi: load address + */ +address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { + __ align(CodeEntryAlignment); + StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); + address start = __ pc(); + + Label resolve_oop, slow_path; + + // We use RDI, which also serves as argument register for slow call. + // RAX always holds the src object ptr, except after the slow call, + // then it holds the result. R8/RBX is used as temporary register. + + Register tmp1 = rdi; + Register tmp2 = LP64_ONLY(r8) NOT_LP64(rbx); + + __ push(tmp1); + __ push(tmp2); + + // Check for object being in the collection set. + __ mov(tmp1, rax); + __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); + __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); + __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); + __ testbool(tmp2); + __ jccb(Assembler::notZero, resolve_oop); + __ pop(tmp2); + __ pop(tmp1); + __ ret(0); + + // Test if object is already resolved. + __ bind(resolve_oop); + __ movptr(tmp2, Address(rax, oopDesc::mark_offset_in_bytes())); + // Test if both lowest bits are set. We trick it by negating the bits + // then test for both bits clear. + __ notptr(tmp2); + __ testb(tmp2, markOopDesc::marked_value); + __ jccb(Assembler::notZero, slow_path); + // Clear both lower bits. It's still inverted, so set them, and then invert back. + __ orptr(tmp2, markOopDesc::marked_value); + __ notptr(tmp2); + // At this point, tmp2 contains the decoded forwarding pointer. + __ mov(rax, tmp2); + + __ pop(tmp2); + __ pop(tmp1); + __ ret(0); + + __ bind(slow_path); + + __ push(rcx); + __ push(rdx); + __ push(rdi); +#ifdef _LP64 + __ push(r8); + __ push(r9); + __ push(r10); + __ push(r11); + __ push(r12); + __ push(r13); + __ push(r14); + __ push(r15); +#endif + __ push(rbp); + __ movptr(rbp, rsp); + __ andptr(rsp, -StackAlignmentInBytes); + __ push_FPU_state(); + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), rax, rsi); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rsi); + } + __ pop_FPU_state(); + __ movptr(rsp, rbp); + __ pop(rbp); +#ifdef _LP64 + __ pop(r15); + __ pop(r14); + __ pop(r13); + __ pop(r12); + __ pop(r11); + __ pop(r10); + __ pop(r9); + __ pop(r8); +#endif + __ pop(rdi); + __ pop(rdx); + __ pop(rcx); + + __ pop(tmp2); + __ pop(tmp1); + __ ret(0); + + return start; +} + +#undef __ + +void ShenandoahBarrierSetAssembler::barrier_stubs_init() { + if (ShenandoahLoadRefBarrier) { + int stub_code_size = 4096; + ResourceMark rm; + BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); + CodeBuffer buf(bb); + StubCodeGenerator cgen(&buf); + _shenandoah_lrb = generate_shenandoah_lrb(&cgen); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp 2020-01-17 17:09:30.458132664 +0100 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" +#ifdef COMPILER1 +class LIR_Assembler; +class ShenandoahPreBarrierStub; +class ShenandoahLoadReferenceBarrierStub; +class StubAssembler; +#endif +class StubCodeGenerator; + +class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { +private: + + static address _shenandoah_lrb; + + void satb_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call); + + void shenandoah_write_barrier_pre(MacroAssembler* masm, + Register obj, + Register pre_val, + Register thread, + Register tmp, + bool tosca_live, + bool expand_call); + + void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Address src); + + void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp); + + address generate_shenandoah_lrb(StubCodeGenerator* cgen); + +public: + static address shenandoah_lrb(); + + void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); +#ifdef COMPILER1 + void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); + void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); + void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); + void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm); +#endif + + void load_reference_barrier(MacroAssembler* masm, Register dst, Address src); + + virtual void cmpxchg_oop(MacroAssembler* masm, + Register res, Address addr, Register oldval, Register newval, + bool exchange, Register tmp1, Register tmp2); + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); + virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Address src, Register tmp1, Register tmp_thread); + virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Address dst, Register val, Register tmp1, Register tmp2); + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, + Register obj, Register tmp, Label& slowpath); + + virtual void barrier_stubs_init(); + +}; + +#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad 2020-01-17 17:09:31.063132631 +0100 @@ -0,0 +1,70 @@ +// +// Copyright (c) 2018, Red Hat, Inc. All rights reserved. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// +// + +source_hpp %{ +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/c2/shenandoahSupport.hpp" +%} + +instruct compareAndSwapP_shenandoah(rRegI res, + memory mem_ptr, + eRegP tmp1, eRegP tmp2, + eAXRegP oldval, eRegP newval, + eFlagsReg cr) +%{ + match(Set res (ShenandoahCompareAndSwapP mem_ptr (Binary oldval newval))); + match(Set res (ShenandoahWeakCompareAndSwapP mem_ptr (Binary oldval newval))); + effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + false, // swap + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} + +instruct compareAndExchangeP_shenandoah(memory mem_ptr, + eAXRegP oldval, eRegP newval, + eRegP tmp1, eRegP tmp2, + eFlagsReg cr) +%{ + match(Set oldval (ShenandoahCompareAndExchangeP mem_ptr (Binary oldval newval))); + effect(KILL cr, TEMP tmp1, TEMP tmp2); + ins_cost(1000); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + true, // exchange + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad 2020-01-17 17:09:31.673132597 +0100 @@ -0,0 +1,112 @@ +// +// Copyright (c) 2018, Red Hat, Inc. All rights reserved. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// +// + +source_hpp %{ +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/c2/shenandoahSupport.hpp" +%} + +instruct compareAndSwapP_shenandoah(rRegI res, + memory mem_ptr, + rRegP tmp1, rRegP tmp2, + rax_RegP oldval, rRegP newval, + rFlagsReg cr) +%{ + predicate(VM_Version::supports_cx8()); + match(Set res (ShenandoahCompareAndSwapP mem_ptr (Binary oldval newval))); + match(Set res (ShenandoahWeakCompareAndSwapP mem_ptr (Binary oldval newval))); + effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + false, // swap + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} + +instruct compareAndSwapN_shenandoah(rRegI res, + memory mem_ptr, + rRegP tmp1, rRegP tmp2, + rax_RegN oldval, rRegN newval, + rFlagsReg cr) %{ + match(Set res (ShenandoahCompareAndSwapN mem_ptr (Binary oldval newval))); + match(Set res (ShenandoahWeakCompareAndSwapN mem_ptr (Binary oldval newval))); + effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + false, // swap + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} + +instruct compareAndExchangeN_shenandoah(memory mem_ptr, + rax_RegN oldval, rRegN newval, + rRegP tmp1, rRegP tmp2, + rFlagsReg cr) %{ + match(Set oldval (ShenandoahCompareAndExchangeN mem_ptr (Binary oldval newval))); + effect(TEMP tmp1, TEMP tmp2, KILL cr); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + true, // exchange + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} + +instruct compareAndExchangeP_shenandoah(memory mem_ptr, + rax_RegP oldval, rRegP newval, + rRegP tmp1, rRegP tmp2, + rFlagsReg cr) +%{ + predicate(VM_Version::supports_cx8()); + match(Set oldval (ShenandoahCompareAndExchangeP mem_ptr (Binary oldval newval))); + effect(KILL cr, TEMP tmp1, TEMP tmp2); + ins_cost(1000); + + format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} + + ins_encode %{ + ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, + NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, + true, // exchange + $tmp1$$Register, $tmp2$$Register + ); + %} + ins_pipe( pipe_cmpxchg ); +%} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shared/markBitMap.cpp 2020-01-17 17:09:32.278132564 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Concurrent marking bit map wrapper + +#include "precompiled.hpp" +#include "gc/shared/markBitMap.inline.hpp" +#include "utilities/bitMap.inline.hpp" + +MarkBitMapRO::MarkBitMapRO(int shifter) : + _bm(), + _shifter(shifter) { + _bmStartWord = 0; + _bmWordSize = 0; +} + +#ifndef PRODUCT +bool MarkBitMapRO::covers(MemRegion heap_rs) const { + // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); + assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, + "size inconsistency"); + return _bmStartWord == (HeapWord*)(heap_rs.start()) && + _bmWordSize == heap_rs.word_size(); +} +#endif + +void MarkBitMapRO::print_on_error(outputStream* st, const char* prefix) const { + _bm.print_on_error(st, prefix); +} + +size_t MarkBitMap::compute_size(size_t heap_size) { + return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); +} + +size_t MarkBitMap::mark_distance() { + return MinObjAlignmentInBytes * BitsPerByte; +} + +void MarkBitMap::initialize(MemRegion heap, MemRegion bitmap) { + _bmStartWord = heap.start(); + _bmWordSize = heap.word_size(); + + _bm = BitMapView((BitMap::bm_word_t*) bitmap.start(), _bmWordSize >> _shifter); + _covered = heap; +} + +void MarkBitMap::do_clear(MemRegion mr, bool large) { + MemRegion intersection = mr.intersection(_covered); + assert(!intersection.is_empty(), + "Given range from " PTR_FORMAT " to " PTR_FORMAT " is completely outside the heap", + p2i(mr.start()), p2i(mr.end())); + // convert address range into offset range + size_t beg = heapWordToOffset(intersection.start()); + size_t end = heapWordToOffset(intersection.end()); + if (large) { + _bm.clear_large_range(beg, end); + } else { + _bm.clear_range(beg, end); + } +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shared/markBitMap.hpp 2020-01-17 17:09:32.882132530 +0100 @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_CMBITMAP_HPP +#define SHARE_VM_GC_SHARED_CMBITMAP_HPP + +#include "memory/memRegion.hpp" +#include "utilities/bitMap.hpp" +#include "utilities/globalDefinitions.hpp" + +// A generic CM bit map. This is essentially a wrapper around the BitMap +// class, with one bit per (1<<_shifter) HeapWords. + +class MarkBitMapRO { + protected: + MemRegion _covered; // The heap area covered by this bitmap. + HeapWord* _bmStartWord; // base address of range covered by map + size_t _bmWordSize; // map size (in #HeapWords covered) + const int _shifter; // map to char or bit + BitMapView _bm; // the bit map itself + + public: + // constructor + MarkBitMapRO(int shifter); + + // inquiries + HeapWord* startWord() const { return _bmStartWord; } + // the following is one past the last word in space + HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } + + // read marks + + bool isMarked(HeapWord* addr) const { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + return _bm.at(heapWordToOffset(addr)); + } + + // iteration + inline bool iterate(BitMapClosure* cl, MemRegion mr); + + // Return the address corresponding to the next marked bit at or after + // "addr", and before "limit", if "limit" is non-NULL. If there is no + // such bit, returns "limit" if that is non-NULL, or else "endWord()". + inline HeapWord* getNextMarkedWordAddress(const HeapWord* addr, + const HeapWord* limit = NULL) const; + + // conversion utilities + HeapWord* offsetToHeapWord(size_t offset) const { + return _bmStartWord + (offset << _shifter); + } + size_t heapWordToOffset(const HeapWord* addr) const { + return pointer_delta(addr, _bmStartWord) >> _shifter; + } + + // The argument addr should be the start address of a valid object + inline HeapWord* nextObject(HeapWord* addr); + + void print_on_error(outputStream* st, const char* prefix) const; + + // debugging + NOT_PRODUCT(bool covers(MemRegion rs) const;) +}; + +class MarkBitMap : public MarkBitMapRO { + private: + // Clear bitmap range + void do_clear(MemRegion mr, bool large); + + public: + static size_t compute_size(size_t heap_size); + // Returns the amount of bytes on the heap between two marks in the bitmap. + static size_t mark_distance(); + // Returns how many bytes (or bits) of the heap a single byte (or bit) of the + // mark bitmap corresponds to. This is the same as the mark distance above. static size_t heap_map_factor() { + static size_t heap_map_factor() { + return mark_distance(); + } + + MarkBitMap() : MarkBitMapRO(LogMinObjAlignment) {} + + // Initializes the underlying BitMap to cover the given area. + void initialize(MemRegion heap, MemRegion bitmap); + + // Write marks. + inline void mark(HeapWord* addr); + inline void clear(HeapWord* addr); + inline bool parMark(HeapWord* addr); + + // Clear range. For larger regions, use *_large. + void clear() { do_clear(_covered, true); } + void clear_range(MemRegion mr) { do_clear(mr, false); } + void clear_range_large(MemRegion mr) { do_clear(mr, true); } +}; + +#endif // SHARE_VM_GC_SHARED_CMBITMAP_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shared/markBitMap.inline.hpp 2020-01-17 17:09:33.485132497 +0100 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP +#define SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP + +#include "gc/shared/markBitMap.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/align.hpp" + +inline HeapWord* MarkBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, + const HeapWord* limit) const { + // First we must round addr *up* to a possible object boundary. + addr = (HeapWord*)align_up((intptr_t)addr, + HeapWordSize << _shifter); + size_t addrOffset = heapWordToOffset(addr); + assert(limit != NULL, "limit must not be NULL"); + size_t limitOffset = heapWordToOffset(limit); + size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); + HeapWord* nextAddr = offsetToHeapWord(nextOffset); + assert(nextAddr >= addr, "get_next_one postcondition"); + assert(nextAddr == limit || isMarked(nextAddr), + "get_next_one postcondition"); + return nextAddr; +} + +inline bool MarkBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { + HeapWord* start_addr = MAX2(startWord(), mr.start()); + HeapWord* end_addr = MIN2(endWord(), mr.end()); + + if (end_addr > start_addr) { + // Right-open interval [start-offset, end-offset). + BitMap::idx_t start_offset = heapWordToOffset(start_addr); + BitMap::idx_t end_offset = heapWordToOffset(end_addr); + + start_offset = _bm.get_next_one_offset(start_offset, end_offset); + while (start_offset < end_offset) { + if (!cl->do_bit(start_offset)) { + return false; + } + HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr); + BitMap::idx_t next_offset = heapWordToOffset(next_addr); + start_offset = _bm.get_next_one_offset(next_offset, end_offset); + } + } + return true; +} + +// The argument addr should be the start address of a valid object +HeapWord* MarkBitMapRO::nextObject(HeapWord* addr) { + oop obj = (oop) addr; + HeapWord* res = addr + obj->size(); + assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity"); + return res; +} + +#define check_mark(addr) \ + assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \ + "outside underlying space?"); \ + /* assert(G1CollectedHeap::heap()->is_in_exact(addr), \ + err_msg("Trying to access not available bitmap "PTR_FORMAT \ + " corresponding to "PTR_FORMAT" (%u)", \ + p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr))); */ + +inline void MarkBitMap::mark(HeapWord* addr) { + check_mark(addr); + _bm.set_bit(heapWordToOffset(addr)); +} + +inline void MarkBitMap::clear(HeapWord* addr) { + check_mark(addr); + _bm.clear_bit(heapWordToOffset(addr)); +} + +inline bool MarkBitMap::parMark(HeapWord* addr) { + check_mark(addr); + return _bm.par_set_bit(heapWordToOffset(addr)); +} + +#undef check_mark + +#endif // SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shared/parallelCleaning.cpp 2020-01-17 17:09:34.084132464 +0100 @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/stringTable.hpp" +#include "code/codeCache.hpp" +#include "gc/shared/parallelCleaning.hpp" +#include "memory/resourceArea.hpp" +#include "prims/resolvedMethodTable.hpp" +#include "logging/log.hpp" +#include "gc/shared/gcCause.hpp" +#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" + +StringSymbolTableUnlinkTask::StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : + AbstractGangTask("String/Symbol Unlinking"), + _is_alive(is_alive), + _par_state_string(StringTable::weak_storage()), + _process_strings(process_strings), _strings_processed(0), _strings_removed(0), + _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { + + _initial_string_table_size = (int) StringTable::the_table()->table_size(); + _initial_symbol_table_size = SymbolTable::the_table()->table_size(); + if (process_symbols) { + SymbolTable::clear_parallel_claimed_index(); + } +} + +StringSymbolTableUnlinkTask::~StringSymbolTableUnlinkTask() { + guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, + "claim value %d after unlink less than initial symbol table size %d", + SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); + + log_info(gc, stringtable)( + "Cleaned string and symbol table, " + "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " + "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", + strings_processed(), strings_removed(), + symbols_processed(), symbols_removed()); +} + +void StringSymbolTableUnlinkTask::work(uint worker_id) { + int strings_processed = 0; + int strings_removed = 0; + int symbols_processed = 0; + int symbols_removed = 0; + if (_process_strings) { + StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); + Atomic::add(strings_processed, &_strings_processed); + Atomic::add(strings_removed, &_strings_removed); + } + if (_process_symbols) { + SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); + Atomic::add(symbols_processed, &_symbols_processed); + Atomic::add(symbols_removed, &_symbols_removed); + } +} + +size_t StringSymbolTableUnlinkTask::strings_processed() const { return (size_t)_strings_processed; } +size_t StringSymbolTableUnlinkTask::strings_removed() const { return (size_t)_strings_removed; } + +size_t StringSymbolTableUnlinkTask::symbols_processed() const { return (size_t)_symbols_processed; } +size_t StringSymbolTableUnlinkTask::symbols_removed() const { return (size_t)_symbols_removed; } + + +Monitor* CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never); + +CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : + _is_alive(is_alive), + _unloading_occurred(unloading_occurred), + _num_workers(num_workers), + _first_nmethod(NULL), + _claimed_nmethod(NULL), + _postponed_list(NULL), + _num_entered_barrier(0) +{ + CompiledMethod::increase_unloading_clock(); + // Get first alive nmethod + CompiledMethodIterator iter = CompiledMethodIterator(); + if(iter.next_alive()) { + _first_nmethod = iter.method(); + } + _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod; +} + +CodeCacheUnloadingTask::~CodeCacheUnloadingTask() { + CodeCache::verify_clean_inline_caches(); + + CodeCache::set_needs_cache_clean(false); + guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); + + CodeCache::verify_icholder_relocations(); +} + +void CodeCacheUnloadingTask::add_to_postponed_list(CompiledMethod* nm) { + CompiledMethod* old; + do { + old = (CompiledMethod*)_postponed_list; + nm->set_unloading_next(old); + } while ((CompiledMethod*)Atomic::cmpxchg(nm, &_postponed_list, old) != old); +} + +void CodeCacheUnloadingTask::clean_nmethod(CompiledMethod* nm) { + bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); + + if (postponed) { + // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. + add_to_postponed_list(nm); + } + + // Mark that this thread has been cleaned/unloaded. + // After this call, it will be safe to ask if this nmethod was unloaded or not. + nm->set_unloading_clock(CompiledMethod::global_unloading_clock()); +} + +void CodeCacheUnloadingTask::clean_nmethod_postponed(CompiledMethod* nm) { + nm->do_unloading_parallel_postponed(); +} + +void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { + CompiledMethod* first; + CompiledMethodIterator last; + + do { + *num_claimed_nmethods = 0; + + first = (CompiledMethod*)_claimed_nmethod; + last = CompiledMethodIterator(first); + + if (first != NULL) { + + for (int i = 0; i < MaxClaimNmethods; i++) { + if (!last.next_alive()) { + break; + } + claimed_nmethods[i] = last.method(); + (*num_claimed_nmethods)++; + } + } + + } while ((CompiledMethod*)Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); +} + +CompiledMethod* CodeCacheUnloadingTask::claim_postponed_nmethod() { + CompiledMethod* claim; + CompiledMethod* next; + + do { + claim = (CompiledMethod*)_postponed_list; + if (claim == NULL) { + return NULL; + } + + next = claim->unloading_next(); + + } while ((CompiledMethod*)Atomic::cmpxchg(next, &_postponed_list, claim) != claim); + + return claim; +} + +// Mark that we're done with the first pass of nmethod cleaning. +void CodeCacheUnloadingTask::barrier_mark(uint worker_id) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + _num_entered_barrier++; + if (_num_entered_barrier == _num_workers) { + ml.notify_all(); + } +} + +// See if we have to wait for the other workers to +// finish their first-pass nmethod cleaning work. +void CodeCacheUnloadingTask::barrier_wait(uint worker_id) { + if (_num_entered_barrier < _num_workers) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + while (_num_entered_barrier < _num_workers) { + ml.wait(Mutex::_no_safepoint_check_flag, 0, false); + } + } +} + +// Cleaning and unloading of nmethods. Some work has to be postponed +// to the second pass, when we know which nmethods survive. +void CodeCacheUnloadingTask::work_first_pass(uint worker_id) { + // The first nmethods is claimed by the first worker. + if (worker_id == 0 && _first_nmethod != NULL) { + clean_nmethod(_first_nmethod); + _first_nmethod = NULL; + } + + int num_claimed_nmethods; + CompiledMethod* claimed_nmethods[MaxClaimNmethods]; + + while (true) { + claim_nmethods(claimed_nmethods, &num_claimed_nmethods); + + if (num_claimed_nmethods == 0) { + break; + } + + for (int i = 0; i < num_claimed_nmethods; i++) { + clean_nmethod(claimed_nmethods[i]); + } + } +} + +void CodeCacheUnloadingTask::work_second_pass(uint worker_id) { + CompiledMethod* nm; + // Take care of postponed nmethods. + while ((nm = claim_postponed_nmethod()) != NULL) { + clean_nmethod_postponed(nm); + } +} + +KlassCleaningTask::KlassCleaningTask(BoolObjectClosure* is_alive) : + _is_alive(is_alive), + _clean_klass_tree_claimed(0), + _klass_iterator() { +} + +bool KlassCleaningTask::claim_clean_klass_tree_task() { + if (_clean_klass_tree_claimed) { + return false; + } + + return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0; +} + +InstanceKlass* KlassCleaningTask::claim_next_klass() { + Klass* klass; + do { + klass =_klass_iterator.next_klass(); + } while (klass != NULL && !klass->is_instance_klass()); + + // this can be null so don't call InstanceKlass::cast + return static_cast(klass); +} + +void KlassCleaningTask::clean_klass(InstanceKlass* ik) { + ik->clean_weak_instanceklass_links(); +} + +void KlassCleaningTask::work() { + ResourceMark rm; + + // One worker will clean the subklass/sibling klass tree. + if (claim_clean_klass_tree_task()) { + Klass::clean_subklass_tree(); + } + + // All workers will help cleaning the classes, + InstanceKlass* klass; + while ((klass = claim_next_klass()) != NULL) { + clean_klass(klass); + } +} + +bool ResolvedMethodCleaningTask::claim_resolved_method_task() { + if (_resolved_method_task_claimed) { + return false; + } + return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0; +} + +// These aren't big, one thread can do it all. +void ResolvedMethodCleaningTask::work() { + if (claim_resolved_method_task()) { + ResolvedMethodTable::unlink(); + } +} + +ParallelCleaningTask::ParallelCleaningTask(BoolObjectClosure* is_alive, + bool process_strings, + bool process_symbols, + uint num_workers, + bool unloading_occurred) : + AbstractGangTask("Parallel Cleaning"), + _string_symbol_task(is_alive, process_strings, process_symbols), + _code_cache_task(num_workers, is_alive, unloading_occurred), + _klass_cleaning_task(is_alive), + _resolved_method_cleaning_task(is_alive) +{ + + +} + +// The parallel work done by all worker threads. +void ParallelCleaningTask::work(uint worker_id) { + // Do first pass of code cache cleaning. + _code_cache_task.work_first_pass(worker_id); + + // Let the threads mark that the first pass is done. + _code_cache_task.barrier_mark(worker_id); + + // Clean the Strings and Symbols. + _string_symbol_task.work(worker_id); + + // Clean unreferenced things in the ResolvedMethodTable + _resolved_method_cleaning_task.work(); + + // Wait for all workers to finish the first code cache cleaning pass. + _code_cache_task.barrier_wait(worker_id); + + // Do the second code cache cleaning work, which realize on + // the liveness information gathered during the first pass. + _code_cache_task.work_second_pass(worker_id); + + // Clean all klasses that were not unloaded. + _klass_cleaning_task.work(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shared/parallelCleaning.hpp 2020-01-17 17:09:34.683132431 +0100 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP +#define SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP + +#include "gc/shared/oopStorageParState.hpp" +#include "gc/shared/workgroup.hpp" + +class StringSymbolTableUnlinkTask : public AbstractGangTask { +private: + BoolObjectClosure* _is_alive; + OopStorage::ParState _par_state_string; + int _initial_string_table_size; + int _initial_symbol_table_size; + + bool _process_strings; + volatile int _strings_processed; + volatile int _strings_removed; + + bool _process_symbols; + volatile int _symbols_processed; + volatile int _symbols_removed; + +public: + StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols); + ~StringSymbolTableUnlinkTask(); + + void work(uint worker_id); + + size_t strings_processed() const; + size_t strings_removed() const; + + size_t symbols_processed() const; + size_t symbols_removed() const; +}; + +class CodeCacheUnloadingTask { +private: + static Monitor* _lock; + + BoolObjectClosure* const _is_alive; + const bool _unloading_occurred; + const uint _num_workers; + + // Variables used to claim nmethods. + CompiledMethod* _first_nmethod; + volatile CompiledMethod* _claimed_nmethod; + + // The list of nmethods that need to be processed by the second pass. + volatile CompiledMethod* _postponed_list; + volatile uint _num_entered_barrier; + + public: + CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred); + ~CodeCacheUnloadingTask(); + + private: + void add_to_postponed_list(CompiledMethod* nm); + + void clean_nmethod(CompiledMethod* nm); + + void clean_nmethod_postponed(CompiledMethod* nm); + + static const int MaxClaimNmethods = 16; + + void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods); + + CompiledMethod* claim_postponed_nmethod(); + + public: + // Mark that we're done with the first pass of nmethod cleaning. + void barrier_mark(uint worker_id); + + // See if we have to wait for the other workers to + // finish their first-pass nmethod cleaning work. + void barrier_wait(uint worker_id); + + // Cleaning and unloading of nmethods. Some work has to be postponed + // to the second pass, when we know which nmethods survive. + void work_first_pass(uint worker_id); + + void work_second_pass(uint worker_id); +}; + +class KlassCleaningTask : public StackObj { + BoolObjectClosure* _is_alive; + volatile int _clean_klass_tree_claimed; + ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; + + public: + KlassCleaningTask(BoolObjectClosure* is_alive); + + private: + bool claim_clean_klass_tree_task(); + + InstanceKlass* claim_next_klass(); + +public: + + void clean_klass(InstanceKlass* ik); + + void work(); +}; + +class ResolvedMethodCleaningTask : public StackObj { + BoolObjectClosure* _is_alive; + volatile int _resolved_method_task_claimed; +public: + ResolvedMethodCleaningTask(BoolObjectClosure* is_alive) : + _is_alive(is_alive), _resolved_method_task_claimed(0) {} + + bool claim_resolved_method_task(); + void work(); +}; + +// To minimize the remark pause times, the tasks below are done in parallel. +class ParallelCleaningTask : public AbstractGangTask { +private: + StringSymbolTableUnlinkTask _string_symbol_task; + CodeCacheUnloadingTask _code_cache_task; + KlassCleaningTask _klass_cleaning_task; + ResolvedMethodCleaningTask _resolved_method_cleaning_task; + +public: + // The constructor is run in the VMThread. + ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred); + + // The parallel work done by all worker threads. + void work(uint worker_id); +}; + +#endif // SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp 2020-01-17 17:09:35.286132398 +0100 @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "c1/c1_IR.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahSATBMarkQueue.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" + +#ifdef ASSERT +#define __ gen->lir(__FILE__, __LINE__)-> +#else +#define __ gen->lir()-> +#endif + +void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) { + ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + bs->gen_pre_barrier_stub(ce, this); +} + +void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) { + ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + bs->gen_load_reference_barrier_stub(ce, this); +} + +ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() : + _pre_barrier_c1_runtime_code_blob(NULL), + _load_reference_barrier_rt_code_blob(NULL) {} + +void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) { + // First we test whether marking is in progress. + BasicType flag_type; + bool patch = (decorators & C1_NEEDS_PATCHING) != 0; + bool do_load = pre_val == LIR_OprFact::illegalOpr; + if (in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 4) { + flag_type = T_INT; + } else { + guarantee(in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 1, + "Assumption"); + // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, + // need to use unsigned instructions to use the large offset to load the satb_mark_queue. + flag_type = T_BOOLEAN; + } + LIR_Opr thrd = gen->getThreadPointer(); + LIR_Address* mark_active_flag_addr = + new LIR_Address(thrd, + in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()), + flag_type); + // Read the marking-in-progress flag. + LIR_Opr flag_val = gen->new_register(T_INT); + __ load(mark_active_flag_addr, flag_val); + __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); + + LIR_PatchCode pre_val_patch_code = lir_patch_none; + + CodeStub* slow; + + if (do_load) { + assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); + assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); + + if (patch) + pre_val_patch_code = lir_patch_normal; + + pre_val = gen->new_register(T_OBJECT); + + if (!addr_opr->is_address()) { + assert(addr_opr->is_register(), "must be"); + addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); + } + slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL); + } else { + assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); + assert(pre_val->is_register(), "must be"); + assert(pre_val->type() == T_OBJECT, "must be an object"); + + slow = new ShenandoahPreBarrierStub(pre_val); + } + + __ branch(lir_cond_notEqual, T_INT, slow); + __ branch_destination(slow->continuation()); +} + +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { + if (ShenandoahLoadRefBarrier) { + return load_reference_barrier_impl(gen, obj, addr); + } else { + return obj; + } +} + +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); + + obj = ensure_in_register(gen, obj); + assert(obj->is_register(), "must be a register at this point"); + addr = ensure_in_register(gen, addr); + assert(addr->is_register(), "must be a register at this point"); + LIR_Opr result = gen->result_register_for(obj->value_type()); + __ move(obj, result); + LIR_Opr tmp1 = gen->new_register(T_OBJECT); + LIR_Opr tmp2 = gen->new_register(T_OBJECT); + + LIR_Opr thrd = gen->getThreadPointer(); + LIR_Address* active_flag_addr = + new LIR_Address(thrd, + in_bytes(ShenandoahThreadLocalData::gc_state_offset()), + T_BYTE); + // Read and check the gc-state-flag. + LIR_Opr flag_val = gen->new_register(T_INT); + __ load(active_flag_addr, flag_val); + LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED | + ShenandoahHeap::EVACUATION | + ShenandoahHeap::TRAVERSAL); + LIR_Opr mask_reg = gen->new_register(T_INT); + __ move(mask, mask_reg); + + if (TwoOperandLIRForm) { + __ logical_and(flag_val, mask_reg, flag_val); + } else { + LIR_Opr masked_flag = gen->new_register(T_INT); + __ logical_and(flag_val, mask_reg, masked_flag); + flag_val = masked_flag; + } + __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); + + CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); + __ branch(lir_cond_notEqual, T_INT, slow); + __ branch_destination(slow->continuation()); + + return result; +} + +LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) { + if (!obj->is_register()) { + LIR_Opr obj_reg; + if (obj->is_constant()) { + obj_reg = gen->new_register(T_OBJECT); + __ move(obj, obj_reg); + } else { +#ifdef AARCH64 + // AArch64 expects double-size register. + obj_reg = gen->new_pointer_register(); +#else + // x86 expects single-size register. + obj_reg = gen->new_register(T_OBJECT); +#endif + __ leal(obj, obj_reg); + } + obj = obj_reg; + } + return obj; +} + +LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) { + if (ShenandoahStoreValEnqueueBarrier) { + obj = ensure_in_register(gen, obj); + pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj); + } + return obj; +} + +void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) { + if (access.is_oop()) { + if (ShenandoahSATBBarrier) { + pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */); + } + value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators()); + } + BarrierSetC1::store_at_resolved(access, value); +} + +LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { + // We must resolve in register when patching. This is to avoid + // having a patch area in the load barrier stub, since the call + // into the runtime to patch will not have the proper oop map. + const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0; + return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); +} + +void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { + if (!access.is_oop()) { + BarrierSetC1::load_at_resolved(access, result); + return; + } + + LIRGenerator *gen = access.gen(); + + if (ShenandoahLoadRefBarrier) { + LIR_Opr tmp = gen->new_register(T_OBJECT); + BarrierSetC1::load_at_resolved(access, tmp); + tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr()); + __ move(tmp, result); + } else { + BarrierSetC1::load_at_resolved(access, result); + } + + if (ShenandoahKeepAliveBarrier) { + DecoratorSet decorators = access.decorators(); + bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; + bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; + bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; + if (is_weak || is_phantom || is_anonymous) { + // Register the value in the referent field with the pre-barrier + LabelObj *Lcont_anonymous; + if (is_anonymous) { + Lcont_anonymous = new LabelObj(); + generate_referent_check(access, Lcont_anonymous); + } + pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr /* addr_opr */, + result /* pre_val */); + if (is_anonymous) { + __ branch_destination(Lcont_anonymous->label()); + } + } + } +} + +class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { + virtual OopMapSet* generate_code(StubAssembler* sasm) { + ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + bs->generate_c1_pre_barrier_runtime_stub(sasm); + return NULL; + } +}; + +class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { + virtual OopMapSet* generate_code(StubAssembler* sasm) { + ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + bs->generate_c1_load_reference_barrier_runtime_stub(sasm); + return NULL; + } +}; + +void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { + C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl; + _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, + "shenandoah_pre_barrier_slow", + false, &pre_code_gen_cl); + if (ShenandoahLoadRefBarrier) { + C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl; + _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + "shenandoah_load_reference_barrier_slow", + false, &lrb_code_gen_cl); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp 2020-01-17 17:09:35.893132365 +0100 @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP +#define SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP + +#include "c1/c1_CodeStubs.hpp" +#include "gc/shared/c1/barrierSetC1.hpp" + +class ShenandoahPreBarrierStub: public CodeStub { + friend class ShenandoahBarrierSetC1; +private: + bool _do_load; + LIR_Opr _addr; + LIR_Opr _pre_val; + LIR_PatchCode _patch_code; + CodeEmitInfo* _info; + +public: + // Version that _does_ generate a load of the previous value from addr. + // addr (the address of the field to be read) must be a LIR_Address + // pre_val (a temporary register) must be a register; + ShenandoahPreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) : + _do_load(true), _addr(addr), _pre_val(pre_val), + _patch_code(patch_code), _info(info) + { + assert(_pre_val->is_register(), "should be temporary register"); + assert(_addr->is_address(), "should be the address of the field"); + } + + // Version that _does not_ generate load of the previous value; the + // previous value is assumed to have already been loaded into pre_val. + ShenandoahPreBarrierStub(LIR_Opr pre_val) : + _do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), + _patch_code(lir_patch_none), _info(NULL) + { + assert(_pre_val->is_register(), "should be a register"); + } + + LIR_Opr addr() const { return _addr; } + LIR_Opr pre_val() const { return _pre_val; } + LIR_PatchCode patch_code() const { return _patch_code; } + CodeEmitInfo* info() const { return _info; } + bool do_load() const { return _do_load; } + + virtual void emit_code(LIR_Assembler* e); + virtual void visit(LIR_OpVisitState* visitor) { + if (_do_load) { + // don't pass in the code emit info since it's processed in the fast + // path + if (_info != NULL) + visitor->do_slow_case(_info); + else + visitor->do_slow_case(); + + visitor->do_input(_addr); + visitor->do_temp(_pre_val); + } else { + visitor->do_slow_case(); + visitor->do_input(_pre_val); + } + } +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("ShenandoahPreBarrierStub"); } +#endif // PRODUCT +}; + +class ShenandoahLoadReferenceBarrierStub: public CodeStub { + friend class ShenandoahBarrierSetC1; +private: + LIR_Opr _obj; + LIR_Opr _addr; + LIR_Opr _result; + LIR_Opr _tmp1; + LIR_Opr _tmp2; + +public: + ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2) : + _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2) + { + assert(_obj->is_register(), "should be register"); + assert(_addr->is_register(), "should be register"); + assert(_result->is_register(), "should be register"); + assert(_tmp1->is_register(), "should be register"); + assert(_tmp2->is_register(), "should be register"); + } + + LIR_Opr obj() const { return _obj; } + LIR_Opr addr() const { return _addr; } + LIR_Opr result() const { return _result; } + LIR_Opr tmp1() const { return _tmp1; } + LIR_Opr tmp2() const { return _tmp2; } + + virtual void emit_code(LIR_Assembler* e); + virtual void visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(); + visitor->do_input(_obj); + visitor->do_temp(_obj); + visitor->do_input(_addr); + visitor->do_temp(_addr); + visitor->do_temp(_result); + visitor->do_temp(_tmp1); + visitor->do_temp(_tmp2); + } +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); } +#endif // PRODUCT +}; + +class LIR_OpShenandoahCompareAndSwap : public LIR_Op { + friend class LIR_OpVisitState; + +private: + LIR_Opr _addr; + LIR_Opr _cmp_value; + LIR_Opr _new_value; + LIR_Opr _tmp1; + LIR_Opr _tmp2; + +public: + LIR_OpShenandoahCompareAndSwap(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, + LIR_Opr t1, LIR_Opr t2, LIR_Opr result) + : LIR_Op(lir_none, result, NULL) // no info + , _addr(addr) + , _cmp_value(cmp_value) + , _new_value(new_value) + , _tmp1(t1) + , _tmp2(t2) { } + + LIR_Opr addr() const { return _addr; } + LIR_Opr cmp_value() const { return _cmp_value; } + LIR_Opr new_value() const { return _new_value; } + LIR_Opr tmp1() const { return _tmp1; } + LIR_Opr tmp2() const { return _tmp2; } + + virtual void visit(LIR_OpVisitState* state) { + assert(_addr->is_valid(), "used"); + assert(_cmp_value->is_valid(), "used"); + assert(_new_value->is_valid(), "used"); + if (_info) state->do_info(_info); + state->do_input(_addr); + state->do_temp(_addr); + state->do_input(_cmp_value); + state->do_temp(_cmp_value); + state->do_input(_new_value); + state->do_temp(_new_value); + if (_tmp1->is_valid()) state->do_temp(_tmp1); + if (_tmp2->is_valid()) state->do_temp(_tmp2); + if (_result->is_valid()) state->do_output(_result); + } + + virtual void emit_code(LIR_Assembler* masm); + + virtual void print_instr(outputStream* out) const { + addr()->print(out); out->print(" "); + cmp_value()->print(out); out->print(" "); + new_value()->print(out); out->print(" "); + tmp1()->print(out); out->print(" "); + tmp2()->print(out); out->print(" "); + } +#ifndef PRODUCT + virtual const char* name() const { + return "shenandoah_cas_obj"; + } +#endif // PRODUCT +}; + +class ShenandoahBarrierSetC1 : public BarrierSetC1 { +private: + CodeBlob* _pre_barrier_c1_runtime_code_blob; + CodeBlob* _load_reference_barrier_rt_code_blob; + + void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val); + + LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); + LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators); + + LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); + + LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj); + +public: + ShenandoahBarrierSetC1(); + + CodeBlob* pre_barrier_c1_runtime_code_blob() { + assert(_pre_barrier_c1_runtime_code_blob != NULL, ""); + return _pre_barrier_c1_runtime_code_blob; + } + + CodeBlob* load_reference_barrier_rt_code_blob() { + assert(_load_reference_barrier_rt_code_blob != NULL, ""); + return _load_reference_barrier_rt_code_blob; + } + +protected: + + virtual void store_at_resolved(LIRAccess& access, LIR_Opr value); + virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); + virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); + + virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); + + virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); + +public: + + virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob); +}; + +#endif // SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp 2020-01-17 17:09:36.499132331 +0100 @@ -0,0 +1,1046 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#include "gc/shenandoah/c2/shenandoahSupport.hpp" +#include "opto/arraycopynode.hpp" +#include "opto/escape.hpp" +#include "opto/graphKit.hpp" +#include "opto/idealKit.hpp" +#include "opto/macro.hpp" +#include "opto/movenode.hpp" +#include "opto/narrowptrnode.hpp" +#include "opto/rootnode.hpp" +#include "opto/runtime.hpp" + +ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { + return reinterpret_cast(BarrierSet::barrier_set()->barrier_set_c2()); +} + +ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) + : _enqueue_barriers(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)), + _load_reference_barriers(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)) { +} + +int ShenandoahBarrierSetC2State::enqueue_barriers_count() const { + return _enqueue_barriers->length(); +} + +ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const { + return _enqueue_barriers->at(idx); +} + +void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { + assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list"); + _enqueue_barriers->append(n); +} + +void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { + if (_enqueue_barriers->contains(n)) { + _enqueue_barriers->remove(n); + } +} + +int ShenandoahBarrierSetC2State::load_reference_barriers_count() const { + return _load_reference_barriers->length(); +} + +ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const { + return _load_reference_barriers->at(idx); +} + +void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { + assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list"); + _load_reference_barriers->append(n); +} + +void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { + if (_load_reference_barriers->contains(n)) { + _load_reference_barriers->remove(n); + } +} + +Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { + if (ShenandoahStoreValEnqueueBarrier) { + obj = shenandoah_enqueue_barrier(kit, obj); + } + return obj; +} + +#define __ kit-> + +bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, + BasicType bt, uint adr_idx) const { + intptr_t offset = 0; + Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); + AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); + + if (offset == Type::OffsetBot) { + return false; // cannot unalias unless there are precise offsets + } + + if (alloc == NULL) { + return false; // No allocation found + } + + intptr_t size_in_bytes = type2aelembytes(bt); + + Node* mem = __ memory(adr_idx); // start searching here... + + for (int cnt = 0; cnt < 50; cnt++) { + + if (mem->is_Store()) { + + Node* st_adr = mem->in(MemNode::Address); + intptr_t st_offset = 0; + Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); + + if (st_base == NULL) { + break; // inscrutable pointer + } + + // Break we have found a store with same base and offset as ours so break + if (st_base == base && st_offset == offset) { + break; + } + + if (st_offset != offset && st_offset != Type::OffsetBot) { + const int MAX_STORE = BytesPerLong; + if (st_offset >= offset + size_in_bytes || + st_offset <= offset - MAX_STORE || + st_offset <= offset - mem->as_Store()->memory_size()) { + // Success: The offsets are provably independent. + // (You may ask, why not just test st_offset != offset and be done? + // The answer is that stores of different sizes can co-exist + // in the same sequence of RawMem effects. We sometimes initialize + // a whole 'tile' of array elements with a single jint or jlong.) + mem = mem->in(MemNode::Memory); + continue; // advance through independent store memory + } + } + + if (st_base != base + && MemNode::detect_ptr_independence(base, alloc, st_base, + AllocateNode::Ideal_allocation(st_base, phase), + phase)) { + // Success: The bases are provably independent. + mem = mem->in(MemNode::Memory); + continue; // advance through independent store memory + } + } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { + + InitializeNode* st_init = mem->in(0)->as_Initialize(); + AllocateNode* st_alloc = st_init->allocation(); + + // Make sure that we are looking at the same allocation site. + // The alloc variable is guaranteed to not be null here from earlier check. + if (alloc == st_alloc) { + // Check that the initialization is storing NULL so that no previous store + // has been moved up and directly write a reference + Node* captured_store = st_init->find_captured_store(offset, + type2aelembytes(T_OBJECT), + phase); + if (captured_store == NULL || captured_store == st_init->zero_memory()) { + return true; + } + } + } + + // Unless there is an explicit 'continue', we must bail out here, + // because 'mem' is an inscrutable memory state (e.g., a call). + break; + } + + return false; +} + +#undef __ +#define __ ideal. + +void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit, + bool do_load, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const TypeOopPtr* val_type, + Node* pre_val, + BasicType bt) const { + // Some sanity checks + // Note: val is unused in this routine. + + if (do_load) { + // We need to generate the load of the previous value + assert(obj != NULL, "must have a base"); + assert(adr != NULL, "where are loading from?"); + assert(pre_val == NULL, "loaded already?"); + assert(val_type != NULL, "need a type"); + + if (ReduceInitialCardMarks + && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { + return; + } + + } else { + // In this case both val_type and alias_idx are unused. + assert(pre_val != NULL, "must be loaded already"); + // Nothing to be done if pre_val is null. + if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; + assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); + } + assert(bt == T_OBJECT, "or we shouldn't be here"); + + IdealKit ideal(kit, true); + + Node* tls = __ thread(); // ThreadLocalStorage + + Node* no_base = __ top(); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); + + float likely = PROB_LIKELY(0.999); + float unlikely = PROB_UNLIKELY(0.999); + + // Offsets into the thread + const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); + const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); + + // Now the actual pointers into the thread + Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); + Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); + + // Now some of the values + Node* marking; + Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); + Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); + marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); + assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); + + // if (!marking) + __ if_then(marking, BoolTest::ne, zero, unlikely); { + BasicType index_bt = TypeX_X->basic_type(); + assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size."); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); + + if (do_load) { + // load original value + // alias_idx correct?? + pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); + } + + // if (pre_val != NULL) + __ if_then(pre_val, BoolTest::ne, kit->null()); { + Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); + + // is the queue for this thread full? + __ if_then(index, BoolTest::ne, zeroX, likely); { + + // decrement the index + Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); + + // Now get the buffer location we will log the previous value into and store it + Node *log_addr = __ AddP(no_base, buffer, next_index); + __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); + // update the index + __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); + + } __ else_(); { + + // logging buffer is full, call the runtime + const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(); + __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls); + } __ end_if(); // (!index) + } __ end_if(); // (pre_val != NULL) + } __ end_if(); // (!marking) + + // Final sync IdealKit and GraphKit. + kit->final_sync(ideal); + + if (ShenandoahSATBBarrier && adr != NULL) { + Node* c = kit->control(); + Node* call = c->in(1)->in(1)->in(1)->in(0); + assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected"); + call->add_req(adr); + } +} + +bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) { + return call->is_CallLeaf() && + call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry); +} + +bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) { + if (!call->is_CallLeaf()) { + return false; + } + + address entry_point = call->as_CallLeaf()->entry_point(); + return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) || + (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); +} + +bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { + if (n->Opcode() != Op_If) { + return false; + } + + Node* bol = n->in(1); + assert(bol->is_Bool(), ""); + Node* cmpx = bol->in(1); + if (bol->as_Bool()->_test._test == BoolTest::ne && + cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && + is_shenandoah_state_load(cmpx->in(1)->in(1)) && + cmpx->in(1)->in(2)->is_Con() && + cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { + return true; + } + + return false; +} + +bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { + if (!n->is_Load()) return false; + const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset()); + return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal + && n->in(2)->in(3)->is_Con() + && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; +} + +void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit, + bool do_load, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const TypeOopPtr* val_type, + Node* pre_val, + BasicType bt) const { + if (ShenandoahSATBBarrier) { + IdealKit ideal(kit); + kit->sync_kit(ideal); + + satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); + + ideal.sync_kit(kit); + kit->final_sync(ideal); + } +} + +Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const { + return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val)); +} + +// Helper that guards and inserts a pre-barrier. +void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, + Node* pre_val, bool need_mem_bar) const { + // We could be accessing the referent field of a reference object. If so, when G1 + // is enabled, we need to log the value in the referent field in an SATB buffer. + // This routine performs some compile time filters and generates suitable + // runtime filters that guard the pre-barrier code. + // Also add memory barrier for non volatile load from the referent field + // to prevent commoning of loads across safepoint. + + // Some compile time checks. + + // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? + const TypeX* otype = offset->find_intptr_t_type(); + if (otype != NULL && otype->is_con() && + otype->get_con() != java_lang_ref_Reference::referent_offset) { + // Constant offset but not the reference_offset so just return + return; + } + + // We only need to generate the runtime guards for instances. + const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); + if (btype != NULL) { + if (btype->isa_aryptr()) { + // Array type so nothing to do + return; + } + + const TypeInstPtr* itype = btype->isa_instptr(); + if (itype != NULL) { + // Can the klass of base_oop be statically determined to be + // _not_ a sub-class of Reference and _not_ Object? + ciKlass* klass = itype->klass(); + if ( klass->is_loaded() && + !klass->is_subtype_of(kit->env()->Reference_klass()) && + !kit->env()->Object_klass()->is_subtype_of(klass)) { + return; + } + } + } + + // The compile time filters did not reject base_oop/offset so + // we need to generate the following runtime filters + // + // if (offset == java_lang_ref_Reference::_reference_offset) { + // if (instance_of(base, java.lang.ref.Reference)) { + // pre_barrier(_, pre_val, ...); + // } + // } + + float likely = PROB_LIKELY( 0.999); + float unlikely = PROB_UNLIKELY(0.999); + + IdealKit ideal(kit); + + Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); + + __ if_then(offset, BoolTest::eq, referent_off, unlikely); { + // Update graphKit memory and control from IdealKit. + kit->sync_kit(ideal); + + Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); + Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); + + // Update IdealKit memory and control from graphKit. + __ sync_kit(kit); + + Node* one = __ ConI(1); + // is_instof == 0 if base_oop == NULL + __ if_then(is_instof, BoolTest::eq, one, unlikely); { + + // Update graphKit from IdeakKit. + kit->sync_kit(ideal); + + // Use the pre-barrier to record the value in the referent field + satb_write_barrier_pre(kit, false /* do_load */, + NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + pre_val /* pre_val */, + T_OBJECT); + if (need_mem_bar) { + // Add memory barrier to prevent commoning reads from this field + // across safepoint since GC can change its value. + kit->insert_mem_bar(Op_MemBarCPUOrder); + } + // Update IdealKit from graphKit. + __ sync_kit(kit); + + } __ end_if(); // _ref_type != ref_none + } __ end_if(); // offset == referent_offset + + // Final sync IdealKit and GraphKit. + kit->final_sync(ideal); +} + +#undef __ + +const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { + const Type **fields = TypeTuple::fields(2); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value + fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + + return TypeFunc::make(domain, range); +} + +const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { + const Type **fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + + return TypeFunc::make(domain, range); +} + +const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() { + const Type **fields = TypeTuple::fields(2); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value + fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address + + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); + + // create result type (range) + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); + + return TypeFunc::make(domain, range); +} + +Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { + DecoratorSet decorators = access.decorators(); + + const TypePtr* adr_type = access.addr().type(); + Node* adr = access.addr().node(); + + bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; + bool on_heap = (decorators & IN_HEAP) != 0; + + if (!access.is_oop() || (!on_heap && !anonymous)) { + return BarrierSetC2::store_at_resolved(access, val); + } + + GraphKit* kit = access.kit(); + + uint adr_idx = kit->C->get_alias_index(adr_type); + assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); + Node* value = val.node(); + value = shenandoah_storeval_barrier(kit, value); + val.set_node(value); + shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), + static_cast(val.type()), NULL /* pre_val */, access.type()); + return BarrierSetC2::store_at_resolved(access, val); +} + +Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { + DecoratorSet decorators = access.decorators(); + + Node* adr = access.addr().node(); + Node* obj = access.base(); + + bool mismatched = (decorators & C2_MISMATCHED) != 0; + bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; + bool on_heap = (decorators & IN_HEAP) != 0; + bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; + bool is_unordered = (decorators & MO_UNORDERED) != 0; + bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap; + + Node* top = Compile::current()->top(); + + Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; + Node* load = BarrierSetC2::load_at_resolved(access, val_type); + + if (access.is_oop()) { + if (ShenandoahLoadRefBarrier) { + load = new ShenandoahLoadReferenceBarrierNode(NULL, load); + load = access.kit()->gvn().transform(load); + } + } + + // If we are reading the value of the referent field of a Reference + // object (either by using Unsafe directly or through reflection) + // then, if SATB is enabled, we need to record the referent in an + // SATB log buffer using the pre-barrier mechanism. + // Also we need to add memory barrier to prevent commoning reads + // from this field across safepoint since GC can change its value. + bool need_read_barrier = ShenandoahKeepAliveBarrier && + (on_heap && (on_weak || (unknown && offset != top && obj != top))); + + if (!access.is_oop() || !need_read_barrier) { + return load; + } + + GraphKit* kit = access.kit(); + + if (on_weak) { + // Use the pre-barrier to record the value in the referent field + satb_write_barrier_pre(kit, false /* do_load */, + NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + load /* pre_val */, T_OBJECT); + // Add memory barrier to prevent commoning reads from this field + // across safepoint since GC can change its value. + kit->insert_mem_bar(Op_MemBarCPUOrder); + } else if (unknown) { + // We do not require a mem bar inside pre_barrier if need_mem_bar + // is set: the barriers would be emitted by us. + insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); + } + + return load; +} + +static void pin_atomic_op(C2AtomicAccess& access) { + if (!access.needs_pinning()) { + return; + } + // SCMemProjNodes represent the memory state of a LoadStore. Their + // main role is to prevent LoadStore nodes from being optimized away + // when their results aren't used. + GraphKit* kit = access.kit(); + Node* load_store = access.raw_access(); + assert(load_store != NULL, "must pin atomic op"); + Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); + kit->set_memory(proj, access.alias_idx()); +} + +Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, + Node* new_val, const Type* value_type) const { + GraphKit* kit = access.kit(); + if (access.is_oop()) { + new_val = shenandoah_storeval_barrier(kit, new_val); + shenandoah_write_barrier_pre(kit, false /* do_load */, + NULL, NULL, max_juint, NULL, NULL, + expected_val /* pre_val */, T_OBJECT); + + MemNode::MemOrd mo = access.mem_node_mo(); + Node* mem = access.memory(); + Node* adr = access.addr().node(); + const TypePtr* adr_type = access.addr().type(); + Node* load_store = NULL; + +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); + Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); + if (ShenandoahCASBarrier) { + load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); + } else { + load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); + } + } else +#endif + { + if (ShenandoahCASBarrier) { + load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); + } else { + load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); + } + } + + access.set_raw_access(load_store); + pin_atomic_op(access); + +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); + } +#endif + load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store)); + return load_store; + } + return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); +} + +Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, + Node* new_val, const Type* value_type) const { + GraphKit* kit = access.kit(); + if (access.is_oop()) { + new_val = shenandoah_storeval_barrier(kit, new_val); + shenandoah_write_barrier_pre(kit, false /* do_load */, + NULL, NULL, max_juint, NULL, NULL, + expected_val /* pre_val */, T_OBJECT); + DecoratorSet decorators = access.decorators(); + MemNode::MemOrd mo = access.mem_node_mo(); + Node* mem = access.memory(); + bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; + Node* load_store = NULL; + Node* adr = access.addr().node(); +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); + Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); + if (ShenandoahCASBarrier) { + if (is_weak_cas) { + load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); + } else { + load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); + } + } else { + if (is_weak_cas) { + load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); + } else { + load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); + } + } + } else +#endif + { + if (ShenandoahCASBarrier) { + if (is_weak_cas) { + load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); + } else { + load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); + } + } else { + if (is_weak_cas) { + load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); + } else { + load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); + } + } + } + access.set_raw_access(load_store); + pin_atomic_op(access); + return load_store; + } + return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); +} + +Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* val, const Type* value_type) const { + GraphKit* kit = access.kit(); + if (access.is_oop()) { + val = shenandoah_storeval_barrier(kit, val); + } + Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); + if (access.is_oop()) { + result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result)); + shenandoah_write_barrier_pre(kit, false /* do_load */, + NULL, NULL, max_juint, NULL, NULL, + result /* pre_val */, T_OBJECT); + } + return result; +} + +// Support for GC barriers emitted during parsing +bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; + if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { + return false; + } + CallLeafNode *call = node->as_CallLeaf(); + if (call->_name == NULL) { + return false; + } + + return strcmp(call->_name, "shenandoah_clone_barrier") == 0 || + strcmp(call->_name, "shenandoah_cas_obj") == 0 || + strcmp(call->_name, "shenandoah_wb_pre") == 0; +} + +Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { + if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + } + if (c->Opcode() == Op_ShenandoahEnqueueBarrier) { + c = c->in(1); + } + return c; +} + +bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { + return !ShenandoahBarrierC2Support::expand(C, igvn); +} + +bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { + if (mode == LoopOptsShenandoahExpand) { + assert(UseShenandoahGC, "only for shenandoah"); + ShenandoahBarrierC2Support::pin_and_expand(phase); + return true; + } else if (mode == LoopOptsShenandoahPostExpand) { + assert(UseShenandoahGC, "only for shenandoah"); + visited.Clear(); + ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase); + return true; + } + return false; +} + +bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const { + return false; +} + +bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) { + const TypeOopPtr* src_type = gvn.type(src)->is_oopptr(); + if (src_type->isa_instptr() != NULL) { + ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); + if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { + if (ik->has_object_fields()) { + return true; + } else { + if (!src_type->klass_is_exact()) { + Compile::current()->dependencies()->assert_leaf_type(ik); + } + } + } else { + return true; + } + } else if (src_type->isa_aryptr()) { + BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); + if (src_elem == T_OBJECT || src_elem == T_ARRAY) { + return true; + } + } else { + return true; + } + return false; +} + +void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { + Node* ctrl = ac->in(TypeFunc::Control); + Node* mem = ac->in(TypeFunc::Memory); + Node* src = ac->in(ArrayCopyNode::Src); + Node* src_offset = ac->in(ArrayCopyNode::SrcPos); + Node* dest = ac->in(ArrayCopyNode::Dest); + Node* dest_offset = ac->in(ArrayCopyNode::DestPos); + Node* length = ac->in(ArrayCopyNode::Length); + assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null"); + assert (src->is_AddP(), "for clone the src should be the interior ptr"); + assert (dest->is_AddP(), "for clone the dst should be the interior ptr"); + + if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) { + // Check if heap is has forwarded objects. If it does, we need to call into the special + // routine that would fix up source references before we can continue. + + enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; + Node* region = new RegionNode(PATH_LIMIT); + Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM); + + Node* thread = phase->transform_later(new ThreadLocalNode()); + Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset)); + + uint gc_state_idx = Compile::AliasIdxRaw; + const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument + debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); + + Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered)); + Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED))); + Node* stable_cmp = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT))); + Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne)); + + IfNode* stable_iff = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If(); + Node* stable_ctrl = phase->transform_later(new IfFalseNode(stable_iff)); + Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff)); + + // Heap is stable, no need to do anything additional + region->init_req(_heap_stable, stable_ctrl); + mem_phi->init_req(_heap_stable, mem); + + // Heap is unstable, call into clone barrier stub + Node* call = phase->make_leaf_call(unstable_ctrl, mem, + ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), + CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), + "shenandoah_clone", + TypeRawPtr::BOTTOM, + src->in(AddPNode::Base)); + call = phase->transform_later(call); + + ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control)); + mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory)); + region->init_req(_heap_unstable, ctrl); + mem_phi->init_req(_heap_unstable, mem); + + // Wire up the actual arraycopy stub now + ctrl = phase->transform_later(region); + mem = phase->transform_later(mem_phi); + + const char* name = "arraycopy"; + call = phase->make_leaf_call(ctrl, mem, + OptoRuntime::fast_arraycopy_Type(), + phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true), + name, TypeRawPtr::BOTTOM, + src, dest, length + LP64_ONLY(COMMA phase->top())); + call = phase->transform_later(call); + + // Hook up the whole thing into the graph + phase->igvn().replace_node(ac, call); + } else { + BarrierSetC2::clone_at_expansion(phase, ac); + } +} + +// Support for macro expanded GC barriers +void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { + if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { + state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); + } + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); + } +} + +void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { + if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { + state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); + } + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); + } +} + +void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { + if (is_shenandoah_wb_pre_call(n)) { + shenandoah_eliminate_wb_pre(n, ¯o->igvn()); + } +} + +void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const { + assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), ""); + Node* c = call->as_Call()->proj_out(TypeFunc::Control); + c = c->unique_ctrl_out(); + assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); + c = c->unique_ctrl_out(); + assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); + Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); + assert(iff->is_If(), "expect test"); + if (!is_shenandoah_marking_if(igvn, iff)) { + c = c->unique_ctrl_out(); + assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); + iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); + assert(is_shenandoah_marking_if(igvn, iff), "expect marking test"); + } + Node* cmpx = iff->in(1)->in(1); + igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); + igvn->rehash_node_delayed(call); + call->del_req(call->req()-1); +} + +void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const { + if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) { + worklist.push(node); + } +} + +void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const { + for (uint i = 0; i < useful.size(); i++) { + Node* n = useful.at(i); + if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Compile::current()->record_for_igvn(n->fast_out(i)); + } + } + } + for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) { + ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i); + if (!useful.member(n)) { + state()->remove_enqueue_barrier(n); + } + } + for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i); + if (!useful.member(n)) { + state()->remove_load_reference_barrier(n); + } + } +} + +void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} + +void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const { + return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena); +} + +ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const { + return reinterpret_cast(Compile::current()->barrier_set_state()); +} + +// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be +// expanded later, then now is the time to do so. +bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } + +#ifdef ASSERT +void ShenandoahBarrierSetC2::verify_gc_barriers(bool post_parse) const { + if (ShenandoahVerifyOptoBarriers && !post_parse) { + ShenandoahBarrierC2Support::verify(Compile::current()->root()); + } +} +#endif + +Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { + if (is_shenandoah_wb_pre_call(n)) { + uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); + if (n->req() > cnt) { + Node* addp = n->in(cnt); + if (has_only_shenandoah_wb_pre_uses(addp)) { + n->del_req(cnt); + if (can_reshape) { + phase->is_IterGVN()->_worklist.push(addp); + } + return n; + } + } + } + if (n->Opcode() == Op_CmpP) { + Node* in1 = n->in(1); + Node* in2 = n->in(2); + if (in1->bottom_type() == TypePtr::NULL_PTR) { + in2 = step_over_gc_barrier(in2); + } + if (in2->bottom_type() == TypePtr::NULL_PTR) { + in1 = step_over_gc_barrier(in1); + } + PhaseIterGVN* igvn = phase->is_IterGVN(); + if (in1 != n->in(1)) { + if (igvn != NULL) { + n->set_req_X(1, in1, igvn); + } else { + n->set_req(1, in1); + } + assert(in2 == n->in(2), "only one change"); + return n; + } + if (in2 != n->in(2)) { + if (igvn != NULL) { + n->set_req_X(2, in2, igvn); + } else { + n->set_req(2, in2); + } + return n; + } + } else if (can_reshape && + n->Opcode() == Op_If && + ShenandoahBarrierC2Support::is_heap_stable_test(n) && + n->in(0) != NULL) { + Node* dom = n->in(0); + Node* prev_dom = n; + int op = n->Opcode(); + int dist = 16; + // Search up the dominator tree for another heap stable test + while (dom->Opcode() != op || // Not same opcode? + !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1? + prev_dom->in(0) != dom) { // One path of test does not dominate? + if (dist < 0) return NULL; + + dist--; + prev_dom = dom; + dom = IfNode::up_one_dom(dom); + if (!dom) return NULL; + } + + // Check that we did not follow a loop back to ourselves + if (n == dom) { + return NULL; + } + + return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN()); + } + return NULL; +} + +bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* u = n->fast_out(i); + if (!is_shenandoah_wb_pre_call(u)) { + return false; + } + } + return n->outcnt() > 0; +} + +Node* ShenandoahBarrierSetC2::arraycopy_load_reference_barrier(PhaseGVN *phase, Node* v) { + if (ShenandoahLoadRefBarrier) { + return phase->transform(new ShenandoahLoadReferenceBarrierNode(NULL, v)); + } + if (ShenandoahStoreValEnqueueBarrier) { + return phase->transform(new ShenandoahEnqueueBarrierNode(v)); + } + return v; +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp 2020-01-17 17:09:37.106132298 +0100 @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP +#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP + +#include "gc/shared/c2/barrierSetC2.hpp" +#include "gc/shenandoah/c2/shenandoahSupport.hpp" +#include "utilities/growableArray.hpp" + +class ShenandoahBarrierSetC2State : public ResourceObj { +private: + GrowableArray* _enqueue_barriers; + GrowableArray* _load_reference_barriers; + +public: + ShenandoahBarrierSetC2State(Arena* comp_arena); + + int enqueue_barriers_count() const; + ShenandoahEnqueueBarrierNode* enqueue_barrier(int idx) const; + void add_enqueue_barrier(ShenandoahEnqueueBarrierNode* n); + void remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n); + + int load_reference_barriers_count() const; + ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const; + void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n); + void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n); +}; + +class ShenandoahBarrierSetC2 : public BarrierSetC2 { +private: + void shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const; + + bool satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, + BasicType bt, uint adr_idx) const; + void satb_write_barrier_pre(GraphKit* kit, bool do_load, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const TypeOopPtr* val_type, + Node* pre_val, + BasicType bt) const; + + void shenandoah_write_barrier_pre(GraphKit* kit, + bool do_load, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const TypeOopPtr* val_type, + Node* pre_val, + BasicType bt) const; + + Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const; + Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const; + + void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, + Node* pre_val, bool need_mem_bar) const; + + static bool clone_needs_barrier(Node* src, PhaseGVN& gvn); + +protected: + virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; + virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; + virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, + Node* new_val, const Type* val_type) const; + virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, + Node* new_val, const Type* value_type) const; + virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const; + +public: + static ShenandoahBarrierSetC2* bsc2(); + + static bool is_shenandoah_wb_pre_call(Node* call); + static bool is_shenandoah_lrb_call(Node* call); + static bool is_shenandoah_marking_if(PhaseTransform *phase, Node* n); + static bool is_shenandoah_state_load(Node* n); + static bool has_only_shenandoah_wb_pre_uses(Node* n); + + ShenandoahBarrierSetC2State* state() const; + + static const TypeFunc* write_ref_field_pre_entry_Type(); + static const TypeFunc* shenandoah_clone_barrier_Type(); + static const TypeFunc* shenandoah_load_reference_barrier_Type(); + virtual bool has_load_barriers() const { return true; } + + // This is the entry-point for the backend to perform accesses through the Access API. + virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; + + // These are general helper methods used by C2 + virtual bool array_copy_requires_gc_barriers(BasicType type) const; + + // Support for GC barriers emitted during parsing + virtual bool is_gc_barrier_node(Node* node) const; + virtual Node* step_over_gc_barrier(Node* c) const; + virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const; + virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const; + virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand || mode == LoopOptsShenandoahPostExpand; } + virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand || mode == LoopOptsShenandoahPostExpand; } + + // Support for macro expanded GC barriers + virtual void register_potential_barrier_node(Node* node) const; + virtual void unregister_potential_barrier_node(Node* node) const; + virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const; + virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const; + virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const; + virtual void add_users_to_worklist(Unique_Node_List* worklist) const; + + // Allow barrier sets to have shared state that is preserved across a compilation unit. + // This could for example comprise macro nodes to be expanded during macro expansion. + virtual void* create_barrier_state(Arena* comp_arena) const; + // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be + // expanded later, then now is the time to do so. + virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const; + +#ifdef ASSERT + virtual void verify_gc_barriers(bool post_parse) const; +#endif + + virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const; + + Node* arraycopy_load_reference_barrier(PhaseGVN *phase, Node* v); + +}; + +#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp 2020-01-17 17:09:37.706132265 +0100 @@ -0,0 +1,3325 @@ +/* + * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/c2/shenandoahSupport.hpp" +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "opto/arraycopynode.hpp" +#include "opto/block.hpp" +#include "opto/callnode.hpp" +#include "opto/castnode.hpp" +#include "opto/movenode.hpp" +#include "opto/phaseX.hpp" +#include "opto/rootnode.hpp" +#include "opto/runtime.hpp" +#include "opto/subnode.hpp" + +bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { + ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); + if ((state->enqueue_barriers_count() + + state->load_reference_barriers_count()) > 0) { + bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; + C->clear_major_progress(); + PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand); + if (C->failing()) return false; + PhaseIdealLoop::verify(igvn); + DEBUG_ONLY(verify_raw_mem(C->root());) + if (attempt_more_loopopts) { + C->set_major_progress(); + int cnt = 0; + if (!C->optimize_loops(cnt, igvn, LoopOptsShenandoahPostExpand)) { + return false; + } + C->clear_major_progress(); + } + } + return true; +} + +bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) { + if (!UseShenandoahGC) { + return false; + } + assert(iff->is_If(), "bad input"); + if (iff->Opcode() != Op_If) { + return false; + } + Node* bol = iff->in(1); + if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { + return false; + } + Node* cmp = bol->in(1); + if (cmp->Opcode() != Op_CmpI) { + return false; + } + Node* in1 = cmp->in(1); + Node* in2 = cmp->in(2); + if (in2->find_int_con(-1) != 0) { + return false; + } + if (in1->Opcode() != Op_AndI) { + return false; + } + in2 = in1->in(2); + if (in2->find_int_con(-1) != mask) { + return false; + } + in1 = in1->in(1); + + return is_gc_state_load(in1); +} + +bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { + return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED); +} + +bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { + if (!UseShenandoahGC) { + return false; + } + if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { + return false; + } + Node* addp = n->in(MemNode::Address); + if (!addp->is_AddP()) { + return false; + } + Node* base = addp->in(AddPNode::Address); + Node* off = addp->in(AddPNode::Offset); + if (base->Opcode() != Op_ThreadLocal) { + return false; + } + if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) { + return false; + } + return true; +} + +bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { + assert(phase->is_dominator(stop, start), "bad inputs"); + ResourceMark rm; + Unique_Node_List wq; + wq.push(start); + for (uint next = 0; next < wq.size(); next++) { + Node *m = wq.at(next); + if (m == stop) { + continue; + } + if (m->is_SafePoint() && !m->is_CallLeaf()) { + return true; + } + if (m->is_Region()) { + for (uint i = 1; i < m->req(); i++) { + wq.push(m->in(i)); + } + } else { + wq.push(m->in(0)); + } + } + return false; +} + +bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) { + assert(is_gc_state_load(n), "inconsistent"); + Node* addp = n->in(MemNode::Address); + Node* dominator = NULL; + for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { + Node* u = addp->fast_out(i); + assert(is_gc_state_load(u), "inconsistent"); + if (u != n && phase->is_dominator(u->in(0), n->in(0))) { + if (dominator == NULL) { + dominator = u; + } else { + if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) { + dominator = u; + } + } + } + } + if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) { + return false; + } + phase->igvn().replace_node(n, dominator); + + return true; +} + +#ifdef ASSERT +bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { + assert(phis.size() == 0, ""); + + while (true) { + if (in->bottom_type() == TypePtr::NULL_PTR) { + if (trace) {tty->print_cr("NULL");} + } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { + if (trace) {tty->print_cr("Non oop");} + } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) { + if (trace) {tty->print_cr("Java mirror");} + } else { + if (in->is_ConstraintCast()) { + in = in->in(1); + continue; + } else if (in->is_AddP()) { + assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); + in = in->in(AddPNode::Address); + continue; + } else if (in->is_Con()) { + if (trace) { + tty->print("Found constant"); + in->dump(); + } + } else if (in->Opcode() == Op_Parm) { + if (trace) { + tty->print("Found argument"); + } + } else if (in->Opcode() == Op_CreateEx) { + if (trace) { + tty->print("Found create-exception"); + } + } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { + if (trace) { + tty->print("Found raw LoadP (OSR argument?)"); + } + } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + if (t == ShenandoahOopStore) { + uint i = 0; + for (; i < phis.size(); i++) { + Node* n = phis.node_at(i); + if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { + break; + } + } + if (i == phis.size()) { + return false; + } + } + barriers_used.push(in); + if (trace) {tty->print("Found barrier"); in->dump();} + } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) { + if (t != ShenandoahOopStore) { + in = in->in(1); + continue; + } + if (trace) {tty->print("Found enqueue barrier"); in->dump();} + phis.push(in, in->req()); + in = in->in(1); + continue; + } else if (in->is_Proj() && in->in(0)->is_Allocate()) { + if (trace) { + tty->print("Found alloc"); + in->in(0)->dump(); + } + } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { + if (trace) { + tty->print("Found Java call"); + } + } else if (in->is_Phi()) { + if (!visited.test_set(in->_idx)) { + if (trace) {tty->print("Pushed phi:"); in->dump();} + phis.push(in, 2); + in = in->in(1); + continue; + } + if (trace) {tty->print("Already seen phi:"); in->dump();} + } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { + if (!visited.test_set(in->_idx)) { + if (trace) {tty->print("Pushed cmovep:"); in->dump();} + phis.push(in, CMoveNode::IfTrue); + in = in->in(CMoveNode::IfFalse); + continue; + } + if (trace) {tty->print("Already seen cmovep:"); in->dump();} + } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { + in = in->in(1); + continue; + } else { + return false; + } + } + bool cont = false; + while (phis.is_nonempty()) { + uint idx = phis.index(); + Node* phi = phis.node(); + if (idx >= phi->req()) { + if (trace) {tty->print("Popped phi:"); phi->dump();} + phis.pop(); + continue; + } + if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} + in = phi->in(idx); + phis.set_index(idx+1); + cont = true; + break; + } + if (!cont) { + break; + } + } + return true; +} + +void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { + if (n1 != NULL) { + n1->dump(+10); + } + if (n2 != NULL) { + n2->dump(+10); + } + fatal("%s", msg); +} + +void ShenandoahBarrierC2Support::verify(RootNode* root) { + ResourceMark rm; + Unique_Node_List wq; + GrowableArray barriers; + Unique_Node_List barriers_used; + Node_Stack phis(0); + VectorSet visited(Thread::current()->resource_area()); + const bool trace = false; + const bool verify_no_useless_barrier = false; + + wq.push(root); + for (uint next = 0; next < wq.size(); next++) { + Node *n = wq.at(next); + if (n->is_Load()) { + const bool trace = false; + if (trace) {tty->print("Verifying"); n->dump();} + if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { + if (trace) {tty->print_cr("Load range/klass");} + } else { + const TypePtr* adr_type = n->as_Load()->adr_type(); + + if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { + if (trace) {tty->print_cr("Mark load");} + } else if (adr_type->isa_instptr() && + adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && + adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) { + if (trace) {tty->print_cr("Reference.get()");} + } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: Load should have barriers", n); + } + } + } else if (n->is_Store()) { + const bool trace = false; + + if (trace) {tty->print("Verifying"); n->dump();} + if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { + Node* adr = n->in(MemNode::Address); + bool verify = true; + + if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { + adr = adr->in(AddPNode::Address); + if (adr->is_AddP()) { + assert(adr->in(AddPNode::Base)->is_top(), ""); + adr = adr->in(AddPNode::Address); + if (adr->Opcode() == Op_LoadP && + adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && + adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && + adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) { + if (trace) {tty->print_cr("SATB prebarrier");} + verify = false; + } + } + } + + if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: Store should have barriers", n); + } + } + if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); + } + } else if (n->Opcode() == Op_CmpP) { + const bool trace = false; + + Node* in1 = n->in(1); + Node* in2 = n->in(2); + if (in1->bottom_type()->isa_oopptr()) { + if (trace) {tty->print("Verifying"); n->dump();} + + bool mark_inputs = false; + if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || + (in1->is_Con() || in2->is_Con())) { + if (trace) {tty->print_cr("Comparison against a constant");} + mark_inputs = true; + } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || + (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { + if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} + mark_inputs = true; + } else { + assert(in2->bottom_type()->isa_oopptr(), ""); + + if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || + !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: Cmp should have barriers", n); + } + } + if (verify_no_useless_barrier && + mark_inputs && + (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || + !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { + phis.clear(); + visited.Reset(); + } + } + } else if (n->is_LoadStore()) { + if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && + !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); + } + + if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); + } + } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { + CallNode* call = n->as_Call(); + + static struct { + const char* name; + struct { + int pos; + verify_type t; + } args[6]; + } calls[] = { + "aescrypt_encryptBlock", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "aescrypt_decryptBlock", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "multiplyToLen", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "squareToLen", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "montgomery_multiply", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, + { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "montgomery_square", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "mulAdd", + { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "vectorizedMismatch", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "updateBytesCRC32", + { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "updateBytesAdler32", + { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "updateBytesCRC32C", + { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "counterMode_AESCrypt", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, + { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, + "cipherBlockChaining_encryptAESCrypt", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, + { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "cipherBlockChaining_decryptAESCrypt", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, + { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "shenandoah_clone_barrier", + { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "ghash_processBlocks", + { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha1_implCompress", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha256_implCompress", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha512_implCompress", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha1_implCompressMB", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha256_implCompressMB", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "sha512_implCompressMB", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + "encodeBlock", + { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, + { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, + }; + + if (call->is_call_to_arraycopystub()) { + Node* dest = NULL; + const TypeTuple* args = n->as_Call()->_tf->domain(); + for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { + if (args->field_at(i)->isa_ptr()) { + j++; + if (j == 2) { + dest = n->in(i); + break; + } + } + } + if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || + !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); + } + } else if (strlen(call->_name) > 5 && + !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { + if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: _fill should have barriers", n); + } + } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { + // skip + } else { + const int calls_len = sizeof(calls) / sizeof(calls[0]); + int i = 0; + for (; i < calls_len; i++) { + if (!strcmp(calls[i].name, call->_name)) { + break; + } + } + if (i != calls_len) { + const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); + for (uint j = 0; j < args_len; j++) { + int pos = calls[i].args[j].pos; + if (pos == -1) { + break; + } + if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); + } + } + for (uint j = TypeFunc::Parms; j < call->req(); j++) { + if (call->in(j)->bottom_type()->make_ptr() && + call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { + uint k = 0; + for (; k < args_len && calls[i].args[k].pos != (int)j; k++); + if (k == args_len) { + fatal("arg %d for call %s not covered", j, call->_name); + } + } + } + } else { + for (uint j = TypeFunc::Parms; j < call->req(); j++) { + if (call->in(j)->bottom_type()->make_ptr() && + call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { + fatal("%s not covered", call->_name); + } + } + } + } + } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + // skip + } else if (n->is_AddP() + || n->is_Phi() + || n->is_ConstraintCast() + || n->Opcode() == Op_Return + || n->Opcode() == Op_CMoveP + || n->Opcode() == Op_CMoveN + || n->Opcode() == Op_Rethrow + || n->is_MemBar() + || n->Opcode() == Op_Conv2B + || n->Opcode() == Op_SafePoint + || n->is_CallJava() + || n->Opcode() == Op_Unlock + || n->Opcode() == Op_EncodeP + || n->Opcode() == Op_DecodeN) { + // nothing to do + } else { + static struct { + int opcode; + struct { + int pos; + verify_type t; + } inputs[2]; + } others[] = { + Op_FastLock, + { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, + Op_Lock, + { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, + Op_ArrayCopy, + { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } }, + Op_StrCompressedCopy, + { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, + Op_StrInflatedCopy, + { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, + Op_AryEq, + { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, + Op_StrIndexOf, + { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, + Op_StrComp, + { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, + Op_StrEquals, + { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, + Op_EncodeISOArray, + { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, + Op_HasNegatives, + { { 2, ShenandoahLoad }, { -1, ShenandoahNone} }, + Op_CastP2X, + { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, + Op_StrIndexOfChar, + { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, + }; + + const int others_len = sizeof(others) / sizeof(others[0]); + int i = 0; + for (; i < others_len; i++) { + if (others[i].opcode == n->Opcode()) { + break; + } + } + uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); + if (i != others_len) { + const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); + for (uint j = 0; j < inputs_len; j++) { + int pos = others[i].inputs[j].pos; + if (pos == -1) { + break; + } + if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { + report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); + } + } + for (uint j = 1; j < stop; j++) { + if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && + n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { + uint k = 0; + for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); + if (k == inputs_len) { + fatal("arg %d for node %s not covered", j, n->Name()); + } + } + } + } else { + for (uint j = 1; j < stop; j++) { + if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && + n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { + fatal("%s not covered", n->Name()); + } + } + } + } + + if (n->is_SafePoint()) { + SafePointNode* sfpt = n->as_SafePoint(); + if (verify_no_useless_barrier && sfpt->jvms() != NULL) { + for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { + if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { + phis.clear(); + visited.Reset(); + } + } + } + } + } + + if (verify_no_useless_barrier) { + for (int i = 0; i < barriers.length(); i++) { + Node* n = barriers.at(i); + if (!barriers_used.member(n)) { + tty->print("XXX useless barrier"); n->dump(-2); + ShouldNotReachHere(); + } + } + } +} +#endif + +bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { + // That both nodes have the same control is not sufficient to prove + // domination, verify that there's no path from d to n + ResourceMark rm; + Unique_Node_List wq; + wq.push(d); + for (uint next = 0; next < wq.size(); next++) { + Node *m = wq.at(next); + if (m == n) { + return false; + } + if (m->is_Phi() && m->in(0)->is_Loop()) { + assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); + } else { + for (uint i = 0; i < m->req(); i++) { + if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) { + wq.push(m->in(i)); + } + } + } + } + return true; +} + +bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { + if (d_c != n_c) { + return phase->is_dominator(d_c, n_c); + } + return is_dominator_same_ctrl(d_c, d, n, phase); +} + +Node* next_mem(Node* mem, int alias) { + Node* res = NULL; + if (mem->is_Proj()) { + res = mem->in(0); + } else if (mem->is_SafePoint() || mem->is_MemBar()) { + res = mem->in(TypeFunc::Memory); + } else if (mem->is_Phi()) { + res = mem->in(1); + } else if (mem->is_MergeMem()) { + res = mem->as_MergeMem()->memory_at(alias); + } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { + assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); + res = mem->in(MemNode::Memory); + } else { +#ifdef ASSERT + mem->dump(); +#endif + ShouldNotReachHere(); + } + return res; +} + +Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { + Node* iffproj = NULL; + while (c != dom) { + Node* next = phase->idom(c); + assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); + if (c->is_Region()) { + ResourceMark rm; + Unique_Node_List wq; + wq.push(c); + for (uint i = 0; i < wq.size(); i++) { + Node *n = wq.at(i); + if (n == next) { + continue; + } + if (n->is_Region()) { + for (uint j = 1; j < n->req(); j++) { + wq.push(n->in(j)); + } + } else { + wq.push(n->in(0)); + } + } + for (uint i = 0; i < wq.size(); i++) { + Node *n = wq.at(i); + assert(n->is_CFG(), ""); + if (n->is_Multi()) { + for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { + Node* u = n->fast_out(j); + if (u->is_CFG()) { + if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { + return NodeSentinel; + } + } + } + } + } + } else if (c->is_Proj()) { + if (c->is_IfProj()) { + if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) { + // continue; + } else { + if (!allow_one_proj) { + return NodeSentinel; + } + if (iffproj == NULL) { + iffproj = c; + } else { + return NodeSentinel; + } + } + } else if (c->Opcode() == Op_JumpProj) { + return NodeSentinel; // unsupported + } else if (c->Opcode() == Op_CatchProj) { + return NodeSentinel; // unsupported + } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) { + return NodeSentinel; // unsupported + } else { + assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); + } + } + c = next; + } + return iffproj; +} + +Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { + ResourceMark rm; + VectorSet wq(Thread::current()->resource_area()); + wq.set(mem->_idx); + mem_ctrl = phase->ctrl_or_self(mem); + while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { + mem = next_mem(mem, alias); + if (wq.test_set(mem->_idx)) { + return NULL; + } + mem_ctrl = phase->ctrl_or_self(mem); + } + if (mem->is_MergeMem()) { + mem = mem->as_MergeMem()->memory_at(alias); + mem_ctrl = phase->ctrl_or_self(mem); + } + return mem; +} + +Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { + Node* mem = NULL; + Node* c = ctrl; + do { + if (c->is_Region()) { + Node* phi_bottom = NULL; + for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) { + Node* u = c->fast_out(i); + if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { + if (u->adr_type() == TypePtr::BOTTOM) { + mem = u; + } + } + } + } else { + if (c->is_Call() && c->as_Call()->adr_type() != NULL) { + CallProjections projs; + c->as_Call()->extract_projections(&projs, true, false); + if (projs.fallthrough_memproj != NULL) { + if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { + if (projs.catchall_memproj == NULL) { + mem = projs.fallthrough_memproj; + } else { + if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { + mem = projs.fallthrough_memproj; + } else { + assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); + mem = projs.catchall_memproj; + } + } + } + } else { + Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); + if (proj != NULL && + proj->adr_type() == TypePtr::BOTTOM) { + mem = proj; + } + } + } else { + for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { + Node* u = c->fast_out(i); + if (u->is_Proj() && + u->bottom_type() == Type::MEMORY && + u->adr_type() == TypePtr::BOTTOM) { + assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); + assert(mem == NULL, "only one proj"); + mem = u; + } + } + assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected"); + } + } + c = phase->idom(c); + } while (mem == NULL); + return mem; +} + +void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* u = n->fast_out(i); + if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { + uses.push(u); + } + } +} + +static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) { + OuterStripMinedLoopEndNode* le = inner->outer_loop_end(); + Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); + phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); + Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); + phase->register_control(new_le, phase->get_loop(le), le->in(0)); + phase->lazy_replace(outer, new_outer); + phase->lazy_replace(le, new_le); + inner->clear_strip_mined(); +} + +void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, + PhaseIdealLoop* phase) { + IdealLoopTree* loop = phase->get_loop(ctrl); + Node* thread = new ThreadLocalNode(); + phase->register_new_node(thread, ctrl); + Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + phase->set_ctrl(offset, phase->C->root()); + Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset); + phase->register_new_node(gc_state_addr, ctrl); + uint gc_state_idx = Compile::AliasIdxRaw; + const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument + debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); + + Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered); + phase->register_new_node(gc_state, ctrl); + Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)); + phase->register_new_node(heap_stable_and, ctrl); + Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT)); + phase->register_new_node(heap_stable_cmp, ctrl); + Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne); + phase->register_new_node(heap_stable_test, ctrl); + IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); + phase->register_control(heap_stable_iff, loop, ctrl); + + heap_stable_ctrl = new IfFalseNode(heap_stable_iff); + phase->register_control(heap_stable_ctrl, loop, heap_stable_iff); + ctrl = new IfTrueNode(heap_stable_iff); + phase->register_control(ctrl, loop, heap_stable_iff); + + assert(is_heap_stable_test(heap_stable_iff), "Should match the shape"); +} + +void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { + const Type* val_t = phase->igvn().type(val); + if (val_t->meet(TypePtr::NULL_PTR) == val_t) { + IdealLoopTree* loop = phase->get_loop(ctrl); + Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT)); + phase->register_new_node(null_cmp, ctrl); + Node* null_test = new BoolNode(null_cmp, BoolTest::ne); + phase->register_new_node(null_test, ctrl); + IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); + phase->register_control(null_iff, loop, ctrl); + ctrl = new IfTrueNode(null_iff); + phase->register_control(ctrl, loop, null_iff); + null_ctrl = new IfFalseNode(null_iff); + phase->register_control(null_ctrl, loop, null_iff); + } +} + +Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { + IdealLoopTree *loop = phase->get_loop(c); + Node* iff = unc_ctrl->in(0); + assert(iff->is_If(), "broken"); + Node* new_iff = iff->clone(); + new_iff->set_req(0, c); + phase->register_control(new_iff, loop, c); + Node* iffalse = new IfFalseNode(new_iff->as_If()); + phase->register_control(iffalse, loop, new_iff); + Node* iftrue = new IfTrueNode(new_iff->as_If()); + phase->register_control(iftrue, loop, new_iff); + c = iftrue; + const Type *t = phase->igvn().type(val); + assert(val->Opcode() == Op_CastPP, "expect cast to non null here"); + Node* uncasted_val = val->in(1); + val = new CastPPNode(uncasted_val, t); + val->init_req(0, c); + phase->register_new_node(val, c); + return val; +} + +void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, + Unique_Node_List& uses, PhaseIdealLoop* phase) { + IfNode* iff = unc_ctrl->in(0)->as_If(); + Node* proj = iff->proj_out(0); + assert(proj != unc_ctrl, "bad projection"); + Node* use = proj->unique_ctrl_out(); + + assert(use == unc || use->is_Region(), "what else?"); + + uses.clear(); + if (use == unc) { + phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use)); + for (uint i = 1; i < unc->req(); i++) { + Node* n = unc->in(i); + if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) { + uses.push(n); + } + } + } else { + assert(use->is_Region(), "what else?"); + uint idx = 1; + for (; use->in(idx) != proj; idx++); + for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { + Node* u = use->fast_out(i); + if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) { + uses.push(u->in(idx)); + } + } + } + for(uint next = 0; next < uses.size(); next++ ) { + Node *n = uses.at(next); + assert(phase->get_ctrl(n) == proj, "bad control"); + phase->set_ctrl_and_loop(n, new_unc_ctrl); + if (n->in(0) == proj) { + phase->igvn().replace_input_of(n, 0, new_unc_ctrl); + } + for (uint i = 0; i < n->req(); i++) { + Node* m = n->in(i); + if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) { + uses.push(m); + } + } + } + + phase->igvn().rehash_node_delayed(use); + int nb = use->replace_edge(proj, new_unc_ctrl); + assert(nb == 1, "only use expected"); +} + +void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { + IdealLoopTree *loop = phase->get_loop(ctrl); + Node* raw_rbtrue = new CastP2XNode(ctrl, val); + phase->register_new_node(raw_rbtrue, ctrl); + Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); + phase->register_new_node(cset_offset, ctrl); + Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); + phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root()); + Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset); + phase->register_new_node(in_cset_fast_test_adr, ctrl); + uint in_cset_fast_test_idx = Compile::AliasIdxRaw; + const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument + debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx)); + Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered); + phase->register_new_node(in_cset_fast_test_load, ctrl); + Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT)); + phase->register_new_node(in_cset_fast_test_cmp, ctrl); + Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq); + phase->register_new_node(in_cset_fast_test_test, ctrl); + IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); + phase->register_control(in_cset_fast_test_iff, loop, ctrl); + + not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff); + phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff); + + ctrl = new IfFalseNode(in_cset_fast_test_iff); + phase->register_control(ctrl, loop, in_cset_fast_test_iff); +} + +void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) { + IdealLoopTree*loop = phase->get_loop(ctrl); + const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr(); + + // The slow path stub consumes and produces raw memory in addition + // to the existing memory edges + Node* base = find_bottom_mem(ctrl, phase); + MergeMemNode* mm = MergeMemNode::make(base); + mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); + phase->register_new_node(mm, ctrl); + + address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ? + CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) : + CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier); + + Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), + target, + "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM); + call->init_req(TypeFunc::Control, ctrl); + call->init_req(TypeFunc::I_O, phase->C->top()); + call->init_req(TypeFunc::Memory, mm); + call->init_req(TypeFunc::FramePtr, phase->C->top()); + call->init_req(TypeFunc::ReturnAdr, phase->C->top()); + call->init_req(TypeFunc::Parms, val); + call->init_req(TypeFunc::Parms+1, load_addr); + phase->register_control(call, loop, ctrl); + ctrl = new ProjNode(call, TypeFunc::Control); + phase->register_control(ctrl, loop, call); + result_mem = new ProjNode(call, TypeFunc::Memory); + phase->register_new_node(result_mem, call); + val = new ProjNode(call, TypeFunc::Parms); + phase->register_new_node(val, call); + val = new CheckCastPPNode(ctrl, val, obj_type); + phase->register_new_node(val, ctrl); +} + +void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { + Node* ctrl = phase->get_ctrl(barrier); + Node* init_raw_mem = fixer.find_mem(ctrl, barrier); + + // Update the control of all nodes that should be after the + // barrier control flow + uses.clear(); + // Every node that is control dependent on the barrier's input + // control will be after the expanded barrier. The raw memory (if + // its memory is control dependent on the barrier's input control) + // must stay above the barrier. + uses_to_ignore.clear(); + if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { + uses_to_ignore.push(init_raw_mem); + } + for (uint next = 0; next < uses_to_ignore.size(); next++) { + Node *n = uses_to_ignore.at(next); + for (uint i = 0; i < n->req(); i++) { + Node* in = n->in(i); + if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { + uses_to_ignore.push(in); + } + } + } + for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { + Node* u = ctrl->fast_out(i); + if (u->_idx < last && + u != barrier && + !uses_to_ignore.member(u) && + (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && + (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { + Node* old_c = phase->ctrl_or_self(u); + Node* c = old_c; + if (c != ctrl || + is_dominator_same_ctrl(old_c, barrier, u, phase) || + ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(ctrl, region); + if (u->is_CFG()) { + if (phase->idom(u) == ctrl) { + phase->set_idom(u, region, phase->dom_depth(region)); + } + } else if (phase->get_ctrl(u) == ctrl) { + assert(u != init_raw_mem, "should leave input raw mem above the barrier"); + uses.push(u); + } + assert(nb == 1, "more than 1 ctrl input?"); + --i, imax -= nb; + } + } + } +} + +static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) { + Node* region = NULL; + while (c != ctrl) { + if (c->is_Region()) { + region = c; + } + c = phase->idom(c); + } + assert(region != NULL, ""); + Node* phi = new PhiNode(region, n->bottom_type()); + for (uint j = 1; j < region->req(); j++) { + Node* in = region->in(j); + if (phase->is_dominator(projs.fallthrough_catchproj, in)) { + phi->init_req(j, n); + } else if (phase->is_dominator(projs.catchall_catchproj, in)) { + phi->init_req(j, n_clone); + } else { + phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); + } + } + phase->register_new_node(phi, region); + return phi; +} + +void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { + ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); + + Unique_Node_List uses; + for (int i = 0; i < state->enqueue_barriers_count(); i++) { + Node* barrier = state->enqueue_barrier(i); + Node* ctrl = phase->get_ctrl(barrier); + IdealLoopTree* loop = phase->get_loop(ctrl); + if (loop->_head->is_OuterStripMinedLoop()) { + // Expanding a barrier here will break loop strip mining + // verification. Transform the loop so the loop nest doesn't + // appear as strip mined. + OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); + hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); + } + } + + Node_Stack stack(0); + Node_List clones; + for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); + if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { + continue; + } + + Node* ctrl = phase->get_ctrl(lrb); + Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + + CallStaticJavaNode* unc = NULL; + Node* unc_ctrl = NULL; + Node* uncasted_val = val; + + for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { + Node* u = lrb->fast_out(i); + if (u->Opcode() == Op_CastPP && + u->in(0) != NULL && + phase->is_dominator(u->in(0), ctrl)) { + const Type* u_t = phase->igvn().type(u); + + if (u_t->meet(TypePtr::NULL_PTR) != u_t && + u->in(0)->Opcode() == Op_IfTrue && + u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && + u->in(0)->in(0)->is_If() && + u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && + u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && + u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && + u->in(0)->in(0)->in(1)->in(1)->in(1) == val && + u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { + IdealLoopTree* loop = phase->get_loop(ctrl); + IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); + + if (!unc_loop->is_member(loop)) { + continue; + } + + Node* branch = no_branches(ctrl, u->in(0), false, phase); + assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); + if (branch == NodeSentinel) { + continue; + } + + phase->igvn().replace_input_of(u, 1, val); + phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u); + phase->set_ctrl(u, u->in(0)); + phase->set_ctrl(lrb, u->in(0)); + unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); + unc_ctrl = u->in(0); + val = u; + + for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { + Node* u = val->fast_out(j); + if (u == lrb) continue; + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, lrb); + --j; jmax -= nb; + } + + RegionNode* r = new RegionNode(3); + IfNode* iff = unc_ctrl->in(0)->as_If(); + + Node* ctrl_use = unc_ctrl->unique_ctrl_out(); + Node* unc_ctrl_clone = unc_ctrl->clone(); + phase->register_control(unc_ctrl_clone, loop, iff); + Node* c = unc_ctrl_clone; + Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); + r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); + + phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); + phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); + phase->lazy_replace(c, unc_ctrl); + c = NULL;; + phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); + phase->set_ctrl(val, unc_ctrl_clone); + + IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); + fix_null_check(unc, unc_ctrl_clone, r, uses, phase); + Node* iff_proj = iff->proj_out(0); + r->init_req(2, iff_proj); + phase->register_control(r, phase->ltree_root(), iff); + + Node* new_bol = new_iff->in(1)->clone(); + Node* new_cmp = new_bol->in(1)->clone(); + assert(new_cmp->Opcode() == Op_CmpP, "broken"); + assert(new_cmp->in(1) == val->in(1), "broken"); + new_bol->set_req(1, new_cmp); + new_cmp->set_req(1, lrb); + phase->register_new_node(new_bol, new_iff->in(0)); + phase->register_new_node(new_cmp, new_iff->in(0)); + phase->igvn().replace_input_of(new_iff, 1, new_bol); + phase->igvn().replace_input_of(new_cast, 1, lrb); + + for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { + Node* u = lrb->fast_out(i); + if (u == new_cast || u == new_cmp) { + continue; + } + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(lrb, new_cast); + assert(nb > 0, "no update?"); + --i; imax -= nb; + } + + for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { + Node* u = val->fast_out(i); + if (u == lrb) { + continue; + } + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, new_cast); + assert(nb > 0, "no update?"); + --i; imax -= nb; + } + + ctrl = unc_ctrl_clone; + phase->set_ctrl_and_loop(lrb, ctrl); + break; + } + } + } + if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) { + CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava(); + if (call->entry_point() == OptoRuntime::rethrow_stub()) { + // The rethrow call may have too many projections to be + // properly handled here. Given there's no reason for a + // barrier to depend on the call, move it above the call + if (phase->get_ctrl(val) == ctrl) { + assert(val->Opcode() == Op_DecodeN, "unexpected node"); + assert(phase->is_dominator(phase->get_ctrl(val->in(1)), call->in(0)), "Load is too low"); + phase->set_ctrl(val, call->in(0)); + } + phase->set_ctrl(lrb, call->in(0)); + continue; + } + CallProjections projs; + call->extract_projections(&projs, false, false); + + Node* lrb_clone = lrb->clone(); + phase->register_new_node(lrb_clone, projs.catchall_catchproj); + phase->set_ctrl(lrb, projs.fallthrough_catchproj); + + stack.push(lrb, 0); + clones.push(lrb_clone); + + do { + assert(stack.size() == clones.size(), ""); + Node* n = stack.node(); +#ifdef ASSERT + if (n->is_Load()) { + Node* mem = n->in(MemNode::Memory); + for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { + Node* u = mem->fast_out(j); + assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); + } + } +#endif + uint idx = stack.index(); + Node* n_clone = clones.at(clones.size()-1); + if (idx < n->outcnt()) { + Node* u = n->raw_out(idx); + Node* c = phase->ctrl_or_self(u); + if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) { + stack.set_index(idx+1); + assert(!u->is_CFG(), ""); + stack.push(u, 0); + Node* u_clone = u->clone(); + int nb = u_clone->replace_edge(n, n_clone); + assert(nb > 0, "should have replaced some uses"); + phase->register_new_node(u_clone, projs.catchall_catchproj); + clones.push(u_clone); + phase->set_ctrl(u, projs.fallthrough_catchproj); + } else { + bool replaced = false; + if (u->is_Phi()) { + for (uint k = 1; k < u->req(); k++) { + if (u->in(k) == n) { + if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) { + phase->igvn().replace_input_of(u, k, n_clone); + replaced = true; + } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) { + phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); + replaced = true; + } + } + } + } else { + if (phase->is_dominator(projs.catchall_catchproj, c)) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(n, n_clone); + assert(nb > 0, "should have replaced some uses"); + replaced = true; + } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase)); + assert(nb > 0, "should have replaced some uses"); + replaced = true; + } + } + if (!replaced) { + stack.set_index(idx+1); + } + } + } else { + stack.pop(); + clones.pop(); + } + } while (stack.size() > 0); + assert(stack.size() == 0 && clones.size() == 0, ""); + } + } + + for (int i = 0; i < state->load_reference_barriers_count(); i++) { + ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); + if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { + continue; + } + Node* ctrl = phase->get_ctrl(lrb); + IdealLoopTree* loop = phase->get_loop(ctrl); + if (loop->_head->is_OuterStripMinedLoop()) { + // Expanding a barrier here will break loop strip mining + // verification. Transform the loop so the loop nest doesn't + // appear as strip mined. + OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); + hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); + } + } + + // Expand load-reference-barriers + MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); + Unique_Node_List uses_to_ignore; + for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); + if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { + phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); + continue; + } + uint last = phase->C->unique(); + Node* ctrl = phase->get_ctrl(lrb); + Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + + + Node* orig_ctrl = ctrl; + + Node* raw_mem = fixer.find_mem(ctrl, lrb); + Node* init_raw_mem = raw_mem; + Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); + + IdealLoopTree *loop = phase->get_loop(ctrl); + CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn()); + Node* unc_ctrl = NULL; + if (unc != NULL) { + if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) { + unc = NULL; + } else { + unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control); + } + } + + Node* uncasted_val = val; + if (unc != NULL) { + uncasted_val = val->in(1); + } + + Node* heap_stable_ctrl = NULL; + Node* null_ctrl = NULL; + + assert(val->bottom_type()->make_oopptr(), "need oop"); + assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); + + enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT }; + Node* region = new RegionNode(PATH_LIMIT); + Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); + Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); + + // Stable path. + test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); + IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); + + // Heap stable case + region->init_req(_heap_stable, heap_stable_ctrl); + val_phi->init_req(_heap_stable, uncasted_val); + raw_mem_phi->init_req(_heap_stable, raw_mem); + + Node* reg2_ctrl = NULL; + // Null case + test_null(ctrl, val, null_ctrl, phase); + if (null_ctrl != NULL) { + reg2_ctrl = null_ctrl->in(0); + region->init_req(_null_path, null_ctrl); + val_phi->init_req(_null_path, uncasted_val); + raw_mem_phi->init_req(_null_path, raw_mem); + } else { + region->del_req(_null_path); + val_phi->del_req(_null_path); + raw_mem_phi->del_req(_null_path); + } + + // Test for in-cset. + // Wires !in_cset(obj) to slot 2 of region and phis + Node* not_cset_ctrl = NULL; + in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); + if (not_cset_ctrl != NULL) { + if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); + region->init_req(_not_cset, not_cset_ctrl); + val_phi->init_req(_not_cset, uncasted_val); + raw_mem_phi->init_req(_not_cset, raw_mem); + } + + // Resolve object when orig-value is in cset. + // Make the unconditional resolve for fwdptr. + Node* new_val = uncasted_val; + if (unc_ctrl != NULL) { + // Clone the null check in this branch to allow implicit null check + new_val = clone_null_check(ctrl, val, unc_ctrl, phase); + fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); + + IfNode* iff = unc_ctrl->in(0)->as_If(); + phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); + } + + // Call lrb-stub and wire up that path in slots 4 + Node* result_mem = NULL; + + Node* fwd = new_val; + Node* addr; + if (ShenandoahSelfFixing) { + VectorSet visited(Thread::current()->resource_area()); + addr = get_load_addr(phase, visited, lrb); + } else { + addr = phase->igvn().zerocon(T_OBJECT); + } + if (addr->Opcode() == Op_AddP) { + Node* orig_base = addr->in(AddPNode::Base); + Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true); + phase->register_new_node(base, ctrl); + if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) { + // Field access + addr = addr->clone(); + addr->set_req(AddPNode::Base, base); + addr->set_req(AddPNode::Address, base); + phase->register_new_node(addr, ctrl); + } else { + Node* addr2 = addr->in(AddPNode::Address); + if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) && + addr2->in(AddPNode::Base) == orig_base) { + addr2 = addr2->clone(); + addr2->set_req(AddPNode::Base, base); + addr2->set_req(AddPNode::Address, base); + phase->register_new_node(addr2, ctrl); + addr = addr->clone(); + addr->set_req(AddPNode::Base, base); + addr->set_req(AddPNode::Address, addr2); + phase->register_new_node(addr, ctrl); + } + } + } + call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase); + region->init_req(_evac_path, ctrl); + val_phi->init_req(_evac_path, fwd); + raw_mem_phi->init_req(_evac_path, result_mem); + + phase->register_control(region, loop, heap_stable_iff); + Node* out_val = val_phi; + phase->register_new_node(val_phi, region); + phase->register_new_node(raw_mem_phi, region); + + fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); + + ctrl = orig_ctrl; + + if (unc != NULL) { + for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { + Node* u = val->fast_out(i); + Node* c = phase->ctrl_or_self(u); + if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, out_val); + --i, imax -= nb; + } + } + if (val->outcnt() == 0) { + phase->igvn()._worklist.push(val); + } + } + phase->igvn().replace_node(lrb, out_val); + + follow_barrier_uses(out_val, ctrl, uses, phase); + + for(uint next = 0; next < uses.size(); next++ ) { + Node *n = uses.at(next); + assert(phase->get_ctrl(n) == ctrl, "bad control"); + assert(n != init_raw_mem, "should leave input raw mem above the barrier"); + phase->set_ctrl(n, region); + follow_barrier_uses(n, ctrl, uses, phase); + } + + // The slow path call produces memory: hook the raw memory phi + // from the expanded load reference barrier with the rest of the graph + // which may require adding memory phis at every post dominated + // region and at enclosing loop heads. Use the memory state + // collected in memory_nodes to fix the memory graph. Update that + // memory state as we go. + fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); + } + // Done expanding load-reference-barriers. + assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); + + for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) { + Node* barrier = state->enqueue_barrier(i); + Node* pre_val = barrier->in(1); + + if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { + ShouldNotReachHere(); + continue; + } + + Node* ctrl = phase->get_ctrl(barrier); + + if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { + assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move"); + ctrl = ctrl->in(0)->in(0); + phase->set_ctrl(barrier, ctrl); + } else if (ctrl->is_CallRuntime()) { + assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move"); + ctrl = ctrl->in(0); + phase->set_ctrl(barrier, ctrl); + } + + Node* init_ctrl = ctrl; + IdealLoopTree* loop = phase->get_loop(ctrl); + Node* raw_mem = fixer.find_mem(ctrl, barrier); + Node* init_raw_mem = raw_mem; + Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); + Node* heap_stable_ctrl = NULL; + Node* null_ctrl = NULL; + uint last = phase->C->unique(); + + enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; + Node* region = new RegionNode(PATH_LIMIT); + Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); + + enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 }; + Node* region2 = new RegionNode(PATH_LIMIT2); + Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); + + // Stable path. + test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); + region->init_req(_heap_stable, heap_stable_ctrl); + phi->init_req(_heap_stable, raw_mem); + + // Null path + Node* reg2_ctrl = NULL; + test_null(ctrl, pre_val, null_ctrl, phase); + if (null_ctrl != NULL) { + reg2_ctrl = null_ctrl->in(0); + region2->init_req(_null_path, null_ctrl); + phi2->init_req(_null_path, raw_mem); + } else { + region2->del_req(_null_path); + phi2->del_req(_null_path); + } + + const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); + const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); + Node* thread = new ThreadLocalNode(); + phase->register_new_node(thread, ctrl); + Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset)); + phase->register_new_node(buffer_adr, ctrl); + Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset)); + phase->register_new_node(index_adr, ctrl); + + BasicType index_bt = TypeX_X->basic_type(); + assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size."); + const TypePtr* adr_type = TypeRawPtr::BOTTOM; + Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered); + phase->register_new_node(index, ctrl); + Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0)); + phase->register_new_node(index_cmp, ctrl); + Node* index_test = new BoolNode(index_cmp, BoolTest::ne); + phase->register_new_node(index_test, ctrl); + IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); + if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff; + phase->register_control(queue_full_iff, loop, ctrl); + Node* not_full = new IfTrueNode(queue_full_iff); + phase->register_control(not_full, loop, queue_full_iff); + Node* full = new IfFalseNode(queue_full_iff); + phase->register_control(full, loop, queue_full_iff); + + ctrl = not_full; + + Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t))); + phase->register_new_node(next_index, ctrl); + + Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered); + phase->register_new_node(buffer, ctrl); + Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index); + phase->register_new_node(log_addr, ctrl); + Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered); + phase->register_new_node(log_store, ctrl); + // update the index + Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered); + phase->register_new_node(index_update, ctrl); + + // Fast-path case + region2->init_req(_fast_path, ctrl); + phi2->init_req(_fast_path, index_update); + + ctrl = full; + + Node* base = find_bottom_mem(ctrl, phase); + + MergeMemNode* mm = MergeMemNode::make(base); + mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); + phase->register_new_node(mm, ctrl); + + Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM); + call->init_req(TypeFunc::Control, ctrl); + call->init_req(TypeFunc::I_O, phase->C->top()); + call->init_req(TypeFunc::Memory, mm); + call->init_req(TypeFunc::FramePtr, phase->C->top()); + call->init_req(TypeFunc::ReturnAdr, phase->C->top()); + call->init_req(TypeFunc::Parms, pre_val); + call->init_req(TypeFunc::Parms+1, thread); + phase->register_control(call, loop, ctrl); + + Node* ctrl_proj = new ProjNode(call, TypeFunc::Control); + phase->register_control(ctrl_proj, loop, call); + Node* mem_proj = new ProjNode(call, TypeFunc::Memory); + phase->register_new_node(mem_proj, call); + + // Slow-path case + region2->init_req(_slow_path, ctrl_proj); + phi2->init_req(_slow_path, mem_proj); + + phase->register_control(region2, loop, reg2_ctrl); + phase->register_new_node(phi2, region2); + + region->init_req(_heap_unstable, region2); + phi->init_req(_heap_unstable, phi2); + + phase->register_control(region, loop, heap_stable_ctrl->in(0)); + phase->register_new_node(phi, region); + + fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase); + for(uint next = 0; next < uses.size(); next++ ) { + Node *n = uses.at(next); + assert(phase->get_ctrl(n) == init_ctrl, "bad control"); + assert(n != init_raw_mem, "should leave input raw mem above the barrier"); + phase->set_ctrl(n, region); + follow_barrier_uses(n, init_ctrl, uses, phase); + } + fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses); + + phase->igvn().replace_node(barrier, pre_val); + } + assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced"); + +} + +Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { + if (visited.test_set(in->_idx)) { + return NULL; + } + switch (in->Opcode()) { + case Op_Proj: + return get_load_addr(phase, visited, in->in(0)); + case Op_CastPP: + case Op_CheckCastPP: + case Op_DecodeN: + case Op_EncodeP: + return get_load_addr(phase, visited, in->in(1)); + case Op_LoadN: + case Op_LoadP: + return in->in(MemNode::Address); + case Op_CompareAndExchangeN: + case Op_CompareAndExchangeP: + case Op_GetAndSetN: + case Op_GetAndSetP: + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: + // Those instructions would just have stored a different + // value into the field. No use to attempt to fix it at this point. + return phase->igvn().zerocon(T_OBJECT); + case Op_CMoveP: + case Op_CMoveN: { + Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue)); + Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse)); + // Handle unambiguous cases: single address reported on both branches. + if (t != NULL && f == NULL) return t; + if (t == NULL && f != NULL) return f; + if (t != NULL && t == f) return t; + // Ambiguity. + return phase->igvn().zerocon(T_OBJECT); + } + case Op_Phi: { + Node* addr = NULL; + for (uint i = 1; i < in->req(); i++) { + Node* addr1 = get_load_addr(phase, visited, in->in(i)); + if (addr == NULL) { + addr = addr1; + } + if (addr != addr1) { + return phase->igvn().zerocon(T_OBJECT); + } + } + return addr; + } + case Op_ShenandoahLoadReferenceBarrier: + return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); + case Op_ShenandoahEnqueueBarrier: + return get_load_addr(phase, visited, in->in(1)); + case Op_CallDynamicJava: + case Op_CallLeaf: + case Op_CallStaticJava: + case Op_ConN: + case Op_ConP: + case Op_Parm: + return phase->igvn().zerocon(T_OBJECT); + default: +#ifdef ASSERT + fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]); +#endif + return phase->igvn().zerocon(T_OBJECT); + } + +} + +void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { + IdealLoopTree *loop = phase->get_loop(iff); + Node* loop_head = loop->_head; + Node* entry_c = loop_head->in(LoopNode::EntryControl); + + Node* bol = iff->in(1); + Node* cmp = bol->in(1); + Node* andi = cmp->in(1); + Node* load = andi->in(1); + + assert(is_gc_state_load(load), "broken"); + if (!phase->is_dominator(load->in(0), entry_c)) { + Node* mem_ctrl = NULL; + Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); + load = load->clone(); + load->set_req(MemNode::Memory, mem); + load->set_req(0, entry_c); + phase->register_new_node(load, entry_c); + andi = andi->clone(); + andi->set_req(1, load); + phase->register_new_node(andi, entry_c); + cmp = cmp->clone(); + cmp->set_req(1, andi); + phase->register_new_node(cmp, entry_c); + bol = bol->clone(); + bol->set_req(1, cmp); + phase->register_new_node(bol, entry_c); + + Node* old_bol =iff->in(1); + phase->igvn().replace_input_of(iff, 1, bol); + } +} + +bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { + if (!n->is_If() || n->is_CountedLoopEnd()) { + return false; + } + Node* region = n->in(0); + + if (!region->is_Region()) { + return false; + } + Node* dom = phase->idom(region); + if (!dom->is_If()) { + return false; + } + + if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { + return false; + } + + IfNode* dom_if = dom->as_If(); + Node* proj_true = dom_if->proj_out(1); + Node* proj_false = dom_if->proj_out(0); + + for (uint i = 1; i < region->req(); i++) { + if (phase->is_dominator(proj_true, region->in(i))) { + continue; + } + if (phase->is_dominator(proj_false, region->in(i))) { + continue; + } + return false; + } + + return true; +} + +void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { + assert(is_heap_stable_test(n), "no other tests"); + if (identical_backtoback_ifs(n, phase)) { + Node* n_ctrl = n->in(0); + if (phase->can_split_if(n_ctrl)) { + IfNode* dom_if = phase->idom(n_ctrl)->as_If(); + if (is_heap_stable_test(n)) { + Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); + assert(is_gc_state_load(gc_state_load), "broken"); + Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); + assert(is_gc_state_load(dom_gc_state_load), "broken"); + if (gc_state_load != dom_gc_state_load) { + phase->igvn().replace_node(gc_state_load, dom_gc_state_load); + } + } + PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); + Node* proj_true = dom_if->proj_out(1); + Node* proj_false = dom_if->proj_out(0); + Node* con_true = phase->igvn().makecon(TypeInt::ONE); + Node* con_false = phase->igvn().makecon(TypeInt::ZERO); + + for (uint i = 1; i < n_ctrl->req(); i++) { + if (phase->is_dominator(proj_true, n_ctrl->in(i))) { + bolphi->init_req(i, con_true); + } else { + assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); + bolphi->init_req(i, con_false); + } + } + phase->register_new_node(bolphi, n_ctrl); + phase->igvn().replace_input_of(n, 1, bolphi); + phase->do_split_if(n); + } + } +} + +IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { + // Find first invariant test that doesn't exit the loop + LoopNode *head = loop->_head->as_Loop(); + IfNode* unswitch_iff = NULL; + Node* n = head->in(LoopNode::LoopBackControl); + int loop_has_sfpts = -1; + while (n != head) { + Node* n_dom = phase->idom(n); + if (n->is_Region()) { + if (n_dom->is_If()) { + IfNode* iff = n_dom->as_If(); + if (iff->in(1)->is_Bool()) { + BoolNode* bol = iff->in(1)->as_Bool(); + if (bol->in(1)->is_Cmp()) { + // If condition is invariant and not a loop exit, + // then found reason to unswitch. + if (is_heap_stable_test(iff) && + (loop_has_sfpts == -1 || loop_has_sfpts == 0)) { + assert(!loop->is_loop_exit(iff), "both branches should be in the loop"); + if (loop_has_sfpts == -1) { + for(uint i = 0; i < loop->_body.size(); i++) { + Node *m = loop->_body[i]; + if (m->is_SafePoint() && !m->is_CallLeaf()) { + loop_has_sfpts = 1; + break; + } + } + if (loop_has_sfpts == -1) { + loop_has_sfpts = 0; + } + } + if (!loop_has_sfpts) { + unswitch_iff = iff; + } + } + } + } + } + } + n = n_dom; + } + return unswitch_iff; +} + + +void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { + Node_List heap_stable_tests; + Node_List gc_state_loads; + stack.push(phase->C->start(), 0); + do { + Node* n = stack.node(); + uint i = stack.index(); + + if (i < n->outcnt()) { + Node* u = n->raw_out(i); + stack.set_index(i+1); + if (!visited.test_set(u->_idx)) { + stack.push(u, 0); + } + } else { + stack.pop(); + if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) { + gc_state_loads.push(n); + } + if (n->is_If() && is_heap_stable_test(n)) { + heap_stable_tests.push(n); + } + } + } while (stack.size() > 0); + + bool progress; + do { + progress = false; + for (uint i = 0; i < gc_state_loads.size(); i++) { + Node* n = gc_state_loads.at(i); + if (n->outcnt() != 0) { + progress |= try_common_gc_state_load(n, phase); + } + } + } while (progress); + + for (uint i = 0; i < heap_stable_tests.size(); i++) { + Node* n = heap_stable_tests.at(i); + assert(is_heap_stable_test(n), "only evacuation test"); + merge_back_to_back_tests(n, phase); + } + + if (!phase->C->major_progress()) { + VectorSet seen(Thread::current()->resource_area()); + for (uint i = 0; i < heap_stable_tests.size(); i++) { + Node* n = heap_stable_tests.at(i); + IdealLoopTree* loop = phase->get_loop(n); + if (loop != phase->ltree_root() && + loop->_child == NULL && + !loop->_irreducible) { + LoopNode* head = loop->_head->as_Loop(); + if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && + !seen.test_set(head->_idx)) { + IfNode* iff = find_unswitching_candidate(loop, phase); + if (iff != NULL) { + Node* bol = iff->in(1); + if (head->is_strip_mined()) { + head->verify_strip_mined(0); + } + move_heap_stable_test_out_of_loop(iff, phase); + if (loop->policy_unswitching(phase)) { + if (head->is_strip_mined()) { + OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop(); + hide_strip_mined_loop(outer, head->as_CountedLoop(), phase); + } + phase->do_unswitching(loop, old_new); + } else { + // Not proceeding with unswitching. Move load back in + // the loop. + phase->igvn().replace_input_of(iff, 1, bol); + } + } + } + } + } + } +} + +#ifdef ASSERT +void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) { + const bool trace = false; + ResourceMark rm; + Unique_Node_List nodes; + Unique_Node_List controls; + Unique_Node_List memories; + + nodes.push(root); + for (uint next = 0; next < nodes.size(); next++) { + Node *n = nodes.at(next); + if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) { + controls.push(n); + if (trace) { tty->print("XXXXXX verifying"); n->dump(); } + for (uint next2 = 0; next2 < controls.size(); next2++) { + Node *m = controls.at(next2); + for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { + Node* u = m->fast_out(i); + if (u->is_CFG() && !u->is_Root() && + !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) && + !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) { + if (trace) { tty->print("XXXXXX pushing control"); u->dump(); } + controls.push(u); + } + } + } + memories.push(n->as_Call()->proj_out(TypeFunc::Memory)); + for (uint next2 = 0; next2 < memories.size(); next2++) { + Node *m = memories.at(next2); + assert(m->bottom_type() == Type::MEMORY, ""); + for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { + Node* u = m->fast_out(i); + if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) { + if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } + memories.push(u); + } else if (u->is_LoadStore()) { + if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); } + memories.push(u->find_out_with(Op_SCMemProj)); + } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) { + if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } + memories.push(u); + } else if (u->is_Phi()) { + assert(u->bottom_type() == Type::MEMORY, ""); + if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) { + assert(controls.member(u->in(0)), ""); + if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } + memories.push(u); + } + } else if (u->is_SafePoint() || u->is_MemBar()) { + for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { + Node* uu = u->fast_out(j); + if (uu->bottom_type() == Type::MEMORY) { + if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); } + memories.push(uu); + } + } + } + } + } + for (uint next2 = 0; next2 < controls.size(); next2++) { + Node *m = controls.at(next2); + if (m->is_Region()) { + bool all_in = true; + for (uint i = 1; i < m->req(); i++) { + if (!controls.member(m->in(i))) { + all_in = false; + break; + } + } + if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); } + bool found_phi = false; + for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) { + Node* u = m->fast_out(j); + if (u->is_Phi() && memories.member(u)) { + found_phi = true; + for (uint i = 1; i < u->req() && found_phi; i++) { + Node* k = u->in(i); + if (memories.member(k) != controls.member(m->in(i))) { + found_phi = false; + } + } + } + } + assert(found_phi || all_in, ""); + } + } + controls.clear(); + memories.clear(); + } + for( uint i = 0; i < n->len(); ++i ) { + Node *m = n->in(i); + if (m != NULL) { + nodes.push(m); + } + } + } +} +#endif + +ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) { + ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this); +} + +const Type* ShenandoahEnqueueBarrierNode::bottom_type() const { + if (in(1) == NULL || in(1)->is_top()) { + return Type::TOP; + } + const Type* t = in(1)->bottom_type(); + if (t == TypePtr::NULL_PTR) { + return t; + } + return t->is_oopptr(); +} + +const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const { + if (in(1) == NULL) { + return Type::TOP; + } + const Type* t = phase->type(in(1)); + if (t == Type::TOP) { + return Type::TOP; + } + if (t == TypePtr::NULL_PTR) { + return t; + } + return t->is_oopptr(); +} + +int ShenandoahEnqueueBarrierNode::needed(Node* n) { + if (n == NULL || + n->is_Allocate() || + n->Opcode() == Op_ShenandoahEnqueueBarrier || + n->bottom_type() == TypePtr::NULL_PTR || + (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) { + return NotNeeded; + } + if (n->is_Phi() || + n->is_CMove()) { + return MaybeNeeded; + } + return Needed; +} + +Node* ShenandoahEnqueueBarrierNode::next(Node* n) { + for (;;) { + if (n == NULL) { + return n; + } else if (n->bottom_type() == TypePtr::NULL_PTR) { + return n; + } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) { + return n; + } else if (n->is_ConstraintCast() || + n->Opcode() == Op_DecodeN || + n->Opcode() == Op_EncodeP) { + n = n->in(1); + } else if (n->is_Proj()) { + n = n->in(0); + } else { + return n; + } + } + ShouldNotReachHere(); + return NULL; +} + +Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) { + PhaseIterGVN* igvn = phase->is_IterGVN(); + + Node* n = next(in(1)); + + int cont = needed(n); + + if (cont == NotNeeded) { + return in(1); + } else if (cont == MaybeNeeded) { + if (igvn == NULL) { + phase->record_for_igvn(this); + return this; + } else { + ResourceMark rm; + Unique_Node_List wq; + uint wq_i = 0; + + for (;;) { + if (n->is_Phi()) { + for (uint i = 1; i < n->req(); i++) { + Node* m = n->in(i); + if (m != NULL) { + wq.push(m); + } + } + } else { + assert(n->is_CMove(), "nothing else here"); + Node* m = n->in(CMoveNode::IfFalse); + wq.push(m); + m = n->in(CMoveNode::IfTrue); + wq.push(m); + } + Node* orig_n = NULL; + do { + if (wq_i >= wq.size()) { + return in(1); + } + n = wq.at(wq_i); + wq_i++; + orig_n = n; + n = next(n); + cont = needed(n); + if (cont == Needed) { + return this; + } + } while (cont != MaybeNeeded || (orig_n != n && wq.member(n))); + } + } + } + + return this; +} + +#ifdef ASSERT +static bool has_never_branch(Node* root) { + for (uint i = 1; i < root->req(); i++) { + Node* in = root->in(i); + if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) { + return true; + } + } + return false; +} +#endif + +void MemoryGraphFixer::collect_memory_nodes() { + Node_Stack stack(0); + VectorSet visited(Thread::current()->resource_area()); + Node_List regions; + + // Walk the raw memory graph and create a mapping from CFG node to + // memory node. Exclude phis for now. + stack.push(_phase->C->root(), 1); + do { + Node* n = stack.node(); + int opc = n->Opcode(); + uint i = stack.index(); + if (i < n->req()) { + Node* mem = NULL; + if (opc == Op_Root) { + Node* in = n->in(i); + int in_opc = in->Opcode(); + if (in_opc == Op_Return || in_opc == Op_Rethrow) { + mem = in->in(TypeFunc::Memory); + } else if (in_opc == Op_Halt) { + if (!in->in(0)->is_Region()) { + Node* proj = in->in(0); + assert(proj->is_Proj(), ""); + Node* in = proj->in(0); + assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); + if (in->is_CallStaticJava()) { + mem = in->in(TypeFunc::Memory); + } else if (in->Opcode() == Op_Catch) { + Node* call = in->in(0)->in(0); + assert(call->is_Call(), ""); + mem = call->in(TypeFunc::Memory); + } else if (in->Opcode() == Op_NeverBranch) { + ResourceMark rm; + Unique_Node_List wq; + wq.push(in); + wq.push(in->as_Multi()->proj_out(0)); + for (uint j = 1; j < wq.size(); j++) { + Node* c = wq.at(j); + assert(!c->is_Root(), "shouldn't leave loop"); + if (c->is_SafePoint()) { + assert(mem == NULL, "only one safepoint"); + mem = c->in(TypeFunc::Memory); + } + for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) { + Node* u = c->fast_out(k); + if (u->is_CFG()) { + wq.push(u); + } + } + } + assert(mem != NULL, "should have found safepoint"); + } + } + } else { +#ifdef ASSERT + n->dump(); + in->dump(); +#endif + ShouldNotReachHere(); + } + } else { + assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); + assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); + mem = n->in(i); + } + i++; + stack.set_index(i); + if (mem == NULL) { + continue; + } + for (;;) { + if (visited.test_set(mem->_idx) || mem->is_Start()) { + break; + } + if (mem->is_Phi()) { + stack.push(mem, 2); + mem = mem->in(1); + } else if (mem->is_Proj()) { + stack.push(mem, mem->req()); + mem = mem->in(0); + } else if (mem->is_SafePoint() || mem->is_MemBar()) { + mem = mem->in(TypeFunc::Memory); + } else if (mem->is_MergeMem()) { + MergeMemNode* mm = mem->as_MergeMem(); + mem = mm->memory_at(_alias); + } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { + assert(_alias == Compile::AliasIdxRaw, ""); + stack.push(mem, mem->req()); + mem = mem->in(MemNode::Memory); + } else { +#ifdef ASSERT + mem->dump(); +#endif + ShouldNotReachHere(); + } + } + } else { + if (n->is_Phi()) { + // Nothing + } else if (!n->is_Root()) { + Node* c = get_ctrl(n); + _memory_nodes.map(c->_idx, n); + } + stack.pop(); + } + } while(stack.is_nonempty()); + + // Iterate over CFG nodes in rpo and propagate memory state to + // compute memory state at regions, creating new phis if needed. + Node_List rpo_list; + visited.Clear(); + _phase->rpo(_phase->C->root(), stack, visited, rpo_list); + Node* root = rpo_list.pop(); + assert(root == _phase->C->root(), ""); + + const bool trace = false; +#ifdef ASSERT + if (trace) { + for (int i = rpo_list.size() - 1; i >= 0; i--) { + Node* c = rpo_list.at(i); + if (_memory_nodes[c->_idx] != NULL) { + tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); + } + } + } +#endif + uint last = _phase->C->unique(); + +#ifdef ASSERT + uint8_t max_depth = 0; + for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { + IdealLoopTree* lpt = iter.current(); + max_depth = MAX2(max_depth, lpt->_nest); + } +#endif + + bool progress = true; + int iteration = 0; + Node_List dead_phis; + while (progress) { + progress = false; + iteration++; + assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); + if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } + IdealLoopTree* last_updated_ilt = NULL; + for (int i = rpo_list.size() - 1; i >= 0; i--) { + Node* c = rpo_list.at(i); + + Node* prev_mem = _memory_nodes[c->_idx]; + if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { + Node* prev_region = regions[c->_idx]; + Node* unique = NULL; + for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { + Node* m = _memory_nodes[c->in(j)->_idx]; + assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); + if (m != NULL) { + if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { + assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), ""); + // continue + } else if (unique == NULL) { + unique = m; + } else if (m == unique) { + // continue + } else { + unique = NodeSentinel; + } + } + } + assert(unique != NULL, "empty phi???"); + if (unique != NodeSentinel) { + if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) { + dead_phis.push(prev_region); + } + regions.map(c->_idx, unique); + } else { + Node* phi = NULL; + if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { + phi = prev_region; + for (uint k = 1; k < c->req(); k++) { + Node* m = _memory_nodes[c->in(k)->_idx]; + assert(m != NULL, "expect memory state"); + phi->set_req(k, m); + } + } else { + for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) { + Node* u = c->fast_out(j); + if (u->is_Phi() && u->bottom_type() == Type::MEMORY && + (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { + phi = u; + for (uint k = 1; k < c->req() && phi != NULL; k++) { + Node* m = _memory_nodes[c->in(k)->_idx]; + assert(m != NULL, "expect memory state"); + if (u->in(k) != m) { + phi = NULL; + } + } + } + } + if (phi == NULL) { + phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); + for (uint k = 1; k < c->req(); k++) { + Node* m = _memory_nodes[c->in(k)->_idx]; + assert(m != NULL, "expect memory state"); + phi->init_req(k, m); + } + } + } + assert(phi != NULL, ""); + regions.map(c->_idx, phi); + } + Node* current_region = regions[c->_idx]; + if (current_region != prev_region) { + progress = true; + if (prev_region == prev_mem) { + _memory_nodes.map(c->_idx, current_region); + } + } + } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { + Node* m = _memory_nodes[_phase->idom(c)->_idx]; + assert(m != NULL, "expect memory state"); + if (m != prev_mem) { + _memory_nodes.map(c->_idx, m); + progress = true; + } + } +#ifdef ASSERT + if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } +#endif + } + } + + // Replace existing phi with computed memory state for that region + // if different (could be a new phi or a dominating memory node if + // that phi was found to be useless). + while (dead_phis.size() > 0) { + Node* n = dead_phis.pop(); + n->replace_by(_phase->C->top()); + n->destruct(); + } + for (int i = rpo_list.size() - 1; i >= 0; i--) { + Node* c = rpo_list.at(i); + if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { + Node* n = regions[c->_idx]; + if (n->is_Phi() && n->_idx >= last && n->in(0) == c) { + _phase->register_new_node(n, c); + } + } + } + for (int i = rpo_list.size() - 1; i >= 0; i--) { + Node* c = rpo_list.at(i); + if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { + Node* n = regions[c->_idx]; + for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { + Node* u = c->fast_out(i); + if (u->is_Phi() && u->bottom_type() == Type::MEMORY && + u != n) { + if (u->adr_type() == TypePtr::BOTTOM) { + fix_memory_uses(u, n, n, c); + } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { + _phase->lazy_replace(u, n); + --i; --imax; + } + } + } + } + } +} + +Node* MemoryGraphFixer::get_ctrl(Node* n) const { + Node* c = _phase->get_ctrl(n); + if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) { + assert(c == n->in(0), ""); + CallNode* call = c->as_Call(); + CallProjections projs; + call->extract_projections(&projs, true, false); + if (projs.catchall_memproj != NULL) { + if (projs.fallthrough_memproj == n) { + c = projs.fallthrough_catchproj; + } else { + assert(projs.catchall_memproj == n, ""); + c = projs.catchall_catchproj; + } + } + } + return c; +} + +Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { + if (_phase->has_ctrl(n)) + return get_ctrl(n); + else { + assert (n->is_CFG(), "must be a CFG node"); + return n; + } +} + +bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { + return m != NULL && get_ctrl(m) == c; +} + +Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { + assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, ""); + Node* mem = _memory_nodes[ctrl->_idx]; + Node* c = ctrl; + while (!mem_is_valid(mem, c) && + (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { + c = _phase->idom(c); + mem = _memory_nodes[c->_idx]; + } + if (n != NULL && mem_is_valid(mem, c)) { + while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { + mem = next_mem(mem, _alias); + } + if (mem->is_MergeMem()) { + mem = mem->as_MergeMem()->memory_at(_alias); + } + if (!mem_is_valid(mem, c)) { + do { + c = _phase->idom(c); + mem = _memory_nodes[c->_idx]; + } while (!mem_is_valid(mem, c) && + (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))); + } + } + assert(mem->bottom_type() == Type::MEMORY, ""); + return mem; +} + +bool MemoryGraphFixer::has_mem_phi(Node* region) const { + for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { + Node* use = region->fast_out(i); + if (use->is_Phi() && use->bottom_type() == Type::MEMORY && + (_phase->C->get_alias_index(use->adr_type()) == _alias)) { + return true; + } + } + return false; +} + +void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { + assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); + const bool trace = false; + DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); + DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); + GrowableArray phis; + if (mem_for_ctrl != mem) { + Node* old = mem_for_ctrl; + Node* prev = NULL; + while (old != mem) { + prev = old; + if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { + assert(_alias == Compile::AliasIdxRaw, ""); + old = old->in(MemNode::Memory); + } else if (old->Opcode() == Op_SCMemProj) { + assert(_alias == Compile::AliasIdxRaw, ""); + old = old->in(0); + } else { + ShouldNotReachHere(); + } + } + assert(prev != NULL, ""); + if (new_ctrl != ctrl) { + _memory_nodes.map(ctrl->_idx, mem); + _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); + } + uint input = (uint)MemNode::Memory; + _phase->igvn().replace_input_of(prev, input, new_mem); + } else { + uses.clear(); + _memory_nodes.map(new_ctrl->_idx, new_mem); + uses.push(new_ctrl); + for(uint next = 0; next < uses.size(); next++ ) { + Node *n = uses.at(next); + assert(n->is_CFG(), ""); + DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* u = n->fast_out(i); + if (!u->is_Root() && u->is_CFG() && u != n) { + Node* m = _memory_nodes[u->_idx]; + if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) && + !has_mem_phi(u) && + u->unique_ctrl_out()->Opcode() != Op_Halt) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); + DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); }); + + if (!mem_is_valid(m, u) || !m->is_Phi()) { + bool push = true; + bool create_phi = true; + if (_phase->is_dominator(new_ctrl, u)) { + create_phi = false; + } else if (!_phase->C->has_irreducible_loop()) { + IdealLoopTree* loop = _phase->get_loop(ctrl); + bool do_check = true; + IdealLoopTree* l = loop; + create_phi = false; + while (l != _phase->ltree_root()) { + Node* head = l->_head; + if (head->in(0) == NULL) { + head = _phase->get_ctrl(head); + } + if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) { + create_phi = true; + do_check = false; + break; + } + l = l->_parent; + } + + if (do_check) { + assert(!create_phi, ""); + IdealLoopTree* u_loop = _phase->get_loop(u); + if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) { + Node* c = ctrl; + while (!_phase->is_dominator(c, u_loop->tail())) { + c = _phase->idom(c); + } + if (!_phase->is_dominator(c, u)) { + do_check = false; + } + } + } + + if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) { + create_phi = true; + } + } + if (create_phi) { + Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); + _phase->register_new_node(phi, u); + phis.push(phi); + DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); + if (!mem_is_valid(m, u)) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); + _memory_nodes.map(u->_idx, phi); + } else { + DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); + for (;;) { + assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); + Node* next = NULL; + if (m->is_Proj()) { + next = m->in(0); + } else { + assert(m->is_Mem() || m->is_LoadStore(), ""); + assert(_alias == Compile::AliasIdxRaw, ""); + next = m->in(MemNode::Memory); + } + if (_phase->get_ctrl(next) != u) { + break; + } + if (next->is_MergeMem()) { + assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); + break; + } + if (next->is_Phi()) { + assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); + break; + } + m = next; + } + + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); + assert(m->is_Mem() || m->is_LoadStore(), ""); + uint input = (uint)MemNode::Memory; + _phase->igvn().replace_input_of(m, input, phi); + push = false; + } + } else { + DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); + } + if (push) { + uses.push(u); + } + } + } else if (!mem_is_valid(m, u) && + !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) { + uses.push(u); + } + } + } + } + for (int i = 0; i < phis.length(); i++) { + Node* n = phis.at(i); + Node* r = n->in(0); + DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); + for (uint j = 1; j < n->req(); j++) { + Node* m = find_mem(r->in(j), NULL); + _phase->igvn().replace_input_of(n, j, m); + DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); + } + } + } + uint last = _phase->C->unique(); + MergeMemNode* mm = NULL; + int alias = _alias; + DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); + for (DUIterator i = mem->outs(); mem->has_out(i); i++) { + Node* u = mem->out(i); + if (u->_idx < last) { + if (u->is_Mem()) { + if (_phase->C->get_alias_index(u->adr_type()) == alias) { + Node* m = find_mem(_phase->get_ctrl(u), u); + if (m != mem) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); + _phase->igvn().replace_input_of(u, MemNode::Memory, m); + --i; + } + } + } else if (u->is_MergeMem()) { + MergeMemNode* u_mm = u->as_MergeMem(); + if (u_mm->memory_at(alias) == mem) { + MergeMemNode* newmm = NULL; + for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { + Node* uu = u->fast_out(j); + assert(!uu->is_MergeMem(), "chain of MergeMems?"); + if (uu->is_Phi()) { + assert(uu->adr_type() == TypePtr::BOTTOM, ""); + Node* region = uu->in(0); + int nb = 0; + for (uint k = 1; k < uu->req(); k++) { + if (uu->in(k) == u) { + Node* m = find_mem(region->in(k), NULL); + if (m != mem) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); + newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); + if (newmm != u) { + _phase->igvn().replace_input_of(uu, k, newmm); + nb++; + --jmax; + } + } + } + } + if (nb > 0) { + --j; + } + } else { + Node* m = find_mem(_phase->ctrl_or_self(uu), uu); + if (m != mem) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); + newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); + if (newmm != u) { + _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); + --j, --jmax; + } + } + } + } + } + } else if (u->is_Phi()) { + assert(u->bottom_type() == Type::MEMORY, "what else?"); + if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { + Node* region = u->in(0); + bool replaced = false; + for (uint j = 1; j < u->req(); j++) { + if (u->in(j) == mem) { + Node* m = find_mem(region->in(j), NULL); + Node* nnew = m; + if (m != mem) { + if (u->adr_type() == TypePtr::BOTTOM) { + mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); + nnew = mm; + } + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); + _phase->igvn().replace_input_of(u, j, nnew); + replaced = true; + } + } + } + if (replaced) { + --i; + } + } + } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || + u->adr_type() == NULL) { + assert(u->adr_type() != NULL || + u->Opcode() == Op_Rethrow || + u->Opcode() == Op_Return || + u->Opcode() == Op_SafePoint || + (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || + (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || + u->Opcode() == Op_CallLeaf, ""); + Node* m = find_mem(_phase->ctrl_or_self(u), u); + if (m != mem) { + mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); + _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); + --i; + } + } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { + Node* m = find_mem(_phase->ctrl_or_self(u), u); + if (m != mem) { + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); + _phase->igvn().replace_input_of(u, u->find_edge(mem), m); + --i; + } + } else if (u->adr_type() != TypePtr::BOTTOM && + _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { + Node* m = find_mem(_phase->ctrl_or_self(u), u); + assert(m != mem, ""); + // u is on the wrong slice... + assert(u->is_ClearArray(), ""); + DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); + _phase->igvn().replace_input_of(u, u->find_edge(mem), m); + --i; + } + } + } +#ifdef ASSERT + assert(new_mem->outcnt() > 0, ""); + for (int i = 0; i < phis.length(); i++) { + Node* n = phis.at(i); + assert(n->outcnt() > 0, "new phi must have uses now"); + } +#endif +} + +MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { + MergeMemNode* mm = MergeMemNode::make(mem); + mm->set_memory_at(_alias, rep_proj); + _phase->register_new_node(mm, rep_ctrl); + return mm; +} + +MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { + MergeMemNode* newmm = NULL; + MergeMemNode* u_mm = u->as_MergeMem(); + Node* c = _phase->get_ctrl(u); + if (_phase->is_dominator(c, rep_ctrl)) { + c = rep_ctrl; + } else { + assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); + } + if (u->outcnt() == 1) { + if (u->req() > (uint)_alias && u->in(_alias) == mem) { + _phase->igvn().replace_input_of(u, _alias, rep_proj); + --i; + } else { + _phase->igvn().rehash_node_delayed(u); + u_mm->set_memory_at(_alias, rep_proj); + } + newmm = u_mm; + _phase->set_ctrl_and_loop(u, c); + } else { + // can't simply clone u and then change one of its input because + // it adds and then removes an edge which messes with the + // DUIterator + newmm = MergeMemNode::make(u_mm->base_memory()); + for (uint j = 0; j < u->req(); j++) { + if (j < newmm->req()) { + if (j == (uint)_alias) { + newmm->set_req(j, rep_proj); + } else if (newmm->in(j) != u->in(j)) { + newmm->set_req(j, u->in(j)); + } + } else if (j == (uint)_alias) { + newmm->add_req(rep_proj); + } else { + newmm->add_req(u->in(j)); + } + } + if ((uint)_alias >= u->req()) { + newmm->set_memory_at(_alias, rep_proj); + } + _phase->register_new_node(newmm, c); + } + return newmm; +} + +bool MemoryGraphFixer::should_process_phi(Node* phi) const { + if (phi->adr_type() == TypePtr::BOTTOM) { + Node* region = phi->in(0); + for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { + Node* uu = region->fast_out(j); + if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { + return false; + } + } + return true; + } + return _phase->C->get_alias_index(phi->adr_type()) == _alias; +} + +void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { + uint last = _phase-> C->unique(); + MergeMemNode* mm = NULL; + assert(mem->bottom_type() == Type::MEMORY, ""); + for (DUIterator i = mem->outs(); mem->has_out(i); i++) { + Node* u = mem->out(i); + if (u != replacement && u->_idx < last) { + if (u->is_MergeMem()) { + MergeMemNode* u_mm = u->as_MergeMem(); + if (u_mm->memory_at(_alias) == mem) { + MergeMemNode* newmm = NULL; + for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { + Node* uu = u->fast_out(j); + assert(!uu->is_MergeMem(), "chain of MergeMems?"); + if (uu->is_Phi()) { + if (should_process_phi(uu)) { + Node* region = uu->in(0); + int nb = 0; + for (uint k = 1; k < uu->req(); k++) { + if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { + if (newmm == NULL) { + newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); + } + if (newmm != u) { + _phase->igvn().replace_input_of(uu, k, newmm); + nb++; + --jmax; + } + } + } + if (nb > 0) { + --j; + } + } + } else { + if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { + if (newmm == NULL) { + newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); + } + if (newmm != u) { + _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); + --j, --jmax; + } + } + } + } + } + } else if (u->is_Phi()) { + assert(u->bottom_type() == Type::MEMORY, "what else?"); + Node* region = u->in(0); + if (should_process_phi(u)) { + bool replaced = false; + for (uint j = 1; j < u->req(); j++) { + if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { + Node* nnew = rep_proj; + if (u->adr_type() == TypePtr::BOTTOM) { + if (mm == NULL) { + mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); + } + nnew = mm; + } + _phase->igvn().replace_input_of(u, j, nnew); + replaced = true; + } + } + if (replaced) { + --i; + } + + } + } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || + u->adr_type() == NULL) { + assert(u->adr_type() != NULL || + u->Opcode() == Op_Rethrow || + u->Opcode() == Op_Return || + u->Opcode() == Op_SafePoint || + u->Opcode() == Op_StoreIConditional || + u->Opcode() == Op_StoreLConditional || + (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || + (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || + u->Opcode() == Op_CallLeaf, "%s", u->Name()); + if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { + if (mm == NULL) { + mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); + } + _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); + --i; + } + } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { + if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { + _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); + --i; + } + } + } + } +} + +ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj) +: Node(ctrl, obj) { + ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); +} + +const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { + if (in(ValueIn) == NULL || in(ValueIn)->is_top()) { + return Type::TOP; + } + const Type* t = in(ValueIn)->bottom_type(); + if (t == TypePtr::NULL_PTR) { + return t; + } + return t->is_oopptr(); +} + +const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const { + // Either input is TOP ==> the result is TOP + const Type *t2 = phase->type(in(ValueIn)); + if( t2 == Type::TOP ) return Type::TOP; + + if (t2 == TypePtr::NULL_PTR) { + return t2; + } + + const Type* type = t2->is_oopptr(); + return type; +} + +Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) { + Node* value = in(ValueIn); + if (!needs_barrier(phase, value)) { + return value; + } + return this; +} + +bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) { + Unique_Node_List visited; + return needs_barrier_impl(phase, n, visited); +} + +bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) { + if (n == NULL) return false; + if (visited.member(n)) { + return false; // Been there. + } + visited.push(n); + + if (n->is_Allocate()) { + // tty->print_cr("optimize barrier on alloc"); + return false; + } + if (n->is_Call()) { + // tty->print_cr("optimize barrier on call"); + return false; + } + + const Type* type = phase->type(n); + if (type == Type::TOP) { + return false; + } + if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { + // tty->print_cr("optimize barrier on null"); + return false; + } + if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) { + // tty->print_cr("optimize barrier on constant"); + return false; + } + + switch (n->Opcode()) { + case Op_AddP: + return true; // TODO: Can refine? + case Op_LoadP: + case Op_ShenandoahCompareAndExchangeN: + case Op_ShenandoahCompareAndExchangeP: + case Op_CompareAndExchangeN: + case Op_CompareAndExchangeP: + case Op_GetAndSetN: + case Op_GetAndSetP: + return true; + case Op_Phi: { + for (uint i = 1; i < n->req(); i++) { + if (needs_barrier_impl(phase, n->in(i), visited)) return true; + } + return false; + } + case Op_CheckCastPP: + case Op_CastPP: + return needs_barrier_impl(phase, n->in(1), visited); + case Op_Proj: + return needs_barrier_impl(phase, n->in(0), visited); + case Op_ShenandoahLoadReferenceBarrier: + // tty->print_cr("optimize barrier on barrier"); + return false; + case Op_Parm: + // tty->print_cr("optimize barrier on input arg"); + return false; + case Op_DecodeN: + case Op_EncodeP: + return needs_barrier_impl(phase, n->in(1), visited); + case Op_LoadN: + return true; + case Op_CMoveN: + case Op_CMoveP: + return needs_barrier_impl(phase, n->in(2), visited) || + needs_barrier_impl(phase, n->in(3), visited); + case Op_ShenandoahEnqueueBarrier: + return needs_barrier_impl(phase, n->in(1), visited); + case Op_CreateEx: + return false; + default: + break; + } +#ifdef ASSERT + tty->print("need barrier on?: "); + tty->print_cr("ins:"); + n->dump(2); + tty->print_cr("outs:"); + n->dump(-2); + ShouldNotReachHere(); +#endif + return true; +} + +ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() { + Unique_Node_List visited; + Node_Stack stack(0); + stack.push(this, 0); + + // Look for strongest strength: go over nodes looking for STRONG ones. + // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes, + // and then the overall strength is NONE. + Strength strength = NONE; + while (strength != STRONG && stack.size() > 0) { + Node* n = stack.node(); + if (visited.member(n)) { + stack.pop(); + continue; + } + visited.push(n); + bool visit_users = false; + switch (n->Opcode()) { + case Op_CallStaticJava: + case Op_CallDynamicJava: + case Op_CallLeaf: + case Op_CallLeafNoFP: + case Op_CompareAndSwapL: + case Op_CompareAndSwapI: + case Op_CompareAndSwapB: + case Op_CompareAndSwapS: + case Op_CompareAndSwapN: + case Op_CompareAndSwapP: + case Op_CompareAndExchangeL: + case Op_CompareAndExchangeI: + case Op_CompareAndExchangeB: + case Op_CompareAndExchangeS: + case Op_CompareAndExchangeN: + case Op_CompareAndExchangeP: + case Op_WeakCompareAndSwapL: + case Op_WeakCompareAndSwapI: + case Op_WeakCompareAndSwapB: + case Op_WeakCompareAndSwapS: + case Op_WeakCompareAndSwapN: + case Op_WeakCompareAndSwapP: + case Op_ShenandoahCompareAndSwapN: + case Op_ShenandoahCompareAndSwapP: + case Op_ShenandoahWeakCompareAndSwapN: + case Op_ShenandoahWeakCompareAndSwapP: + case Op_ShenandoahCompareAndExchangeN: + case Op_ShenandoahCompareAndExchangeP: + case Op_GetAndSetL: + case Op_GetAndSetI: + case Op_GetAndSetB: + case Op_GetAndSetS: + case Op_GetAndSetP: + case Op_GetAndSetN: + case Op_GetAndAddL: + case Op_GetAndAddI: + case Op_GetAndAddB: + case Op_GetAndAddS: + case Op_ShenandoahEnqueueBarrier: + case Op_FastLock: + case Op_FastUnlock: + case Op_Rethrow: + case Op_Return: + case Op_StoreB: + case Op_StoreC: + case Op_StoreD: + case Op_StoreF: + case Op_StoreL: + case Op_StoreLConditional: + case Op_StoreI: + case Op_StoreIConditional: + case Op_StoreN: + case Op_StoreP: + case Op_StoreVector: + case Op_StrInflatedCopy: + case Op_StrCompressedCopy: + case Op_EncodeP: + case Op_CastP2X: + case Op_SafePoint: + case Op_EncodeISOArray: + case Op_AryEq: + case Op_StrEquals: + case Op_StrComp: + case Op_StrIndexOf: + case Op_StrIndexOfChar: + case Op_HasNegatives: + // Known to require barriers + strength = STRONG; + break; + case Op_CmpP: { + if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) || + n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) { + // One of the sides is known null, no need for barrier. + } else { + strength = STRONG; + } + break; + } + case Op_LoadB: + case Op_LoadUB: + case Op_LoadUS: + case Op_LoadD: + case Op_LoadF: + case Op_LoadL: + case Op_LoadI: + case Op_LoadS: + case Op_LoadN: + case Op_LoadP: + case Op_LoadVector: { + const TypePtr* adr_type = n->adr_type(); + int alias_idx = Compile::current()->get_alias_index(adr_type); + Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx); + ciField* field = alias_type->field(); + bool is_static = field != NULL && field->is_static(); + bool is_final = field != NULL && field->is_final(); + + if (ShenandoahOptimizeStaticFinals && is_static && is_final) { + // Loading the constant does not require barriers: it should be handled + // as part of GC roots already. + } else { + strength = STRONG; + } + break; + } + case Op_Conv2B: + case Op_LoadRange: + case Op_LoadKlass: + case Op_LoadNKlass: + // Do not require barriers + break; + case Op_AddP: + case Op_CheckCastPP: + case Op_CastPP: + case Op_CMoveP: + case Op_Phi: + case Op_ShenandoahLoadReferenceBarrier: + // Whether or not these need the barriers depends on their users + visit_users = true; + break; + default: { +#ifdef ASSERT + fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]); +#else + // Default to strong: better to have excess barriers, rather than miss some. + strength = STRONG; +#endif + } + } + + stack.pop(); + if (visit_users) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* user = n->fast_out(i); + if (user != NULL) { + stack.push(user, 0); + } + } + } + } + return strength; +} + +CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) { + Node* val = in(ValueIn); + + const Type* val_t = igvn.type(val); + + if (val_t->meet(TypePtr::NULL_PTR) != val_t && + val->Opcode() == Op_CastPP && + val->in(0) != NULL && + val->in(0)->Opcode() == Op_IfTrue && + val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && + val->in(0)->in(0)->is_If() && + val->in(0)->in(0)->in(1)->Opcode() == Op_Bool && + val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && + val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && + val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) && + val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { + assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), ""); + CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); + return unc; + } + return NULL; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp 2020-01-17 17:09:38.325132231 +0100 @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP +#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP + +#include "memory/allocation.hpp" +#include "opto/addnode.hpp" +#include "opto/graphKit.hpp" +#include "opto/machnode.hpp" +#include "opto/memnode.hpp" +#include "opto/multnode.hpp" +#include "opto/node.hpp" + +class PhaseGVN; +class MemoryGraphFixer; + +class ShenandoahBarrierC2Support : public AllStatic { +private: +#ifdef ASSERT + enum verify_type { + ShenandoahLoad, + ShenandoahStore, + ShenandoahValue, + ShenandoahOopStore, + ShenandoahNone + }; + + static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used); + static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL); + static void verify_raw_mem(RootNode* root); +#endif + static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase); + static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase); + static bool is_heap_state_test(Node* iff, int mask); + static bool try_common_gc_state_load(Node *n, PhaseIdealLoop *phase); + static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase); + static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase); + static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase); + static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase); + static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, + PhaseIdealLoop* phase); + static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase); + static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase); + static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses, + PhaseIdealLoop* phase); + static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase); + static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase); + static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase); + static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase); + static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase); + static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase); + + static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb); +public: + static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase); + static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase); + + static bool is_gc_state_load(Node* n); + static bool is_heap_stable_test(Node* iff); + + static bool expand(Compile* C, PhaseIterGVN& igvn); + static void pin_and_expand(PhaseIdealLoop* phase); + static void optimize_after_expansion(VectorSet& visited, Node_Stack& nstack, Node_List& old_new, PhaseIdealLoop* phase); + +#ifdef ASSERT + static void verify(RootNode* root); +#endif +}; + +class ShenandoahEnqueueBarrierNode : public Node { +public: + ShenandoahEnqueueBarrierNode(Node* val); + + const Type *bottom_type() const; + const Type* Value(PhaseGVN* phase) const; + Node* Identity(PhaseGVN* phase); + + int Opcode() const; + +private: + enum { Needed, NotNeeded, MaybeNeeded }; + + static int needed(Node* n); + static Node* next(Node* n); +}; + +class MemoryGraphFixer : public ResourceObj { +private: + Node_List _memory_nodes; + int _alias; + PhaseIdealLoop* _phase; + bool _include_lsm; + + void collect_memory_nodes(); + Node* get_ctrl(Node* n) const; + Node* ctrl_or_self(Node* n) const; + bool mem_is_valid(Node* m, Node* c) const; + MergeMemNode* allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const; + MergeMemNode* clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const; + void fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const; + bool should_process_phi(Node* phi) const; + bool has_mem_phi(Node* region) const; + +public: + MemoryGraphFixer(int alias, bool include_lsm, PhaseIdealLoop* phase) : + _alias(alias), _phase(phase), _include_lsm(include_lsm) { + assert(_alias != Compile::AliasIdxBot, "unsupported"); + collect_memory_nodes(); + } + + Node* find_mem(Node* ctrl, Node* n) const; + void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses); + int alias() const { return _alias; } +}; + +class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode { +public: + ShenandoahCompareAndSwapPNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) + : CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) { + return new CompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahCompareAndSwapNNode : public CompareAndSwapNNode { +public: + ShenandoahCompareAndSwapNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) + : CompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) { + return new CompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahWeakCompareAndSwapPNode : public WeakCompareAndSwapPNode { +public: + ShenandoahWeakCompareAndSwapPNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) + : WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) { + return new WeakCompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahWeakCompareAndSwapNNode : public WeakCompareAndSwapNNode { +public: + ShenandoahWeakCompareAndSwapNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) + : WeakCompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) { + return new WeakCompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahCompareAndExchangePNode : public CompareAndExchangePNode { +public: + ShenandoahCompareAndExchangePNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) + : CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) { + return new CompareAndExchangePNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahCompareAndExchangeNNode : public CompareAndExchangeNNode { +public: + ShenandoahCompareAndExchangeNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) + : CompareAndExchangeNNode(c, mem, adr, val, ex, at, t, mem_ord) { } + + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) { + return new CompareAndExchangeNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order()); + } + return NULL; + } + + virtual int Opcode() const; +}; + +class ShenandoahLoadReferenceBarrierNode : public Node { +public: + enum { + Control, + ValueIn + }; + + enum Strength { + NONE, STRONG + }; + + ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val); + + virtual int Opcode() const; + virtual const Type* bottom_type() const; + virtual const Type* Value(PhaseGVN* phase) const; + virtual const class TypePtr *adr_type() const { return TypeOopPtr::BOTTOM; } + virtual uint match_edge(uint idx) const { + return idx >= ValueIn; + } + virtual uint ideal_reg() const { return Op_RegP; } + + virtual Node* Identity(PhaseGVN* phase); + + uint size_of() const { + return sizeof(*this); + } + + Strength get_barrier_strength(); + CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn); + +private: + bool needs_barrier(PhaseGVN* phase, Node* n); + bool needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited); +}; + + +#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp 2020-01-17 17:09:38.940132197 +0100 @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" +#include "utilities/quickSort.hpp" + +ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() : + ShenandoahHeuristics(), + _cycle_gap_history(new TruncatedSeq(5)), + _conc_mark_duration_history(new TruncatedSeq(5)), + _conc_uprefs_duration_history(new TruncatedSeq(5)) {} + +ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {} + +void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { + size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; + + // The logic for cset selection in adaptive is as follows: + // + // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME + // during evacuation, and thus guarantee full GC. In practice, we also want to let + // application to allocate something. This is why we limit CSet to some fraction of + // available space. In non-overloaded heap, max_cset would contain all plausible candidates + // over garbage threshold. + // + // 2. We should not get cset too low so that free threshold would not be met right + // after the cycle. Otherwise we get back-to-back cycles for no reason if heap is + // too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero. + // + // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates + // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before + // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, + // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. + + size_t capacity = ShenandoahHeap::heap()->max_capacity(); + size_t free_target = capacity / 100 * ShenandoahMinFreeThreshold; + size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0; + size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); + + log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " + SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), + byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage)); + + // Better select garbage-first regions + QuickSort::sort(data, (int)size, compare_by_garbage, false); + + size_t cur_cset = 0; + size_t cur_garbage = 0; + _bytes_in_cset = 0; + + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + + size_t new_cset = cur_cset + r->get_live_data_bytes(); + size_t new_garbage = cur_garbage + r->garbage(); + + if (new_cset > max_cset) { + break; + } + + if ((new_garbage < min_garbage) || (r->garbage() > garbage_threshold)) { + cset->add_region(r); + _bytes_in_cset += r->used(); + cur_cset = new_cset; + cur_garbage = new_garbage; + } + } +} + +void ShenandoahAdaptiveHeuristics::record_cycle_start() { + ShenandoahHeuristics::record_cycle_start(); + double last_cycle_gap = (_cycle_start - _last_cycle_end); + _cycle_gap_history->add(last_cycle_gap); +} + +void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) { + if (phase == ShenandoahPhaseTimings::conc_mark) { + _conc_mark_duration_history->add(secs); + } else if (phase == ShenandoahPhaseTimings::conc_update_refs) { + _conc_uprefs_duration_history->add(secs); + } // Else ignore +} + +bool ShenandoahAdaptiveHeuristics::should_start_gc() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t capacity = heap->max_capacity(); + size_t available = heap->free_set()->available(); + + // Check if we are falling below the worst limit, time to trigger the GC, regardless of + // anything else. + size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; + if (available < min_threshold) { + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); + return true; + } + + // Check if are need to learn a bit about the application + const size_t max_learn = ShenandoahLearningSteps; + if (_gc_times_learned < max_learn) { + size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; + if (available < init_threshold) { + log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + _gc_times_learned + 1, max_learn, + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); + return true; + } + } + + // Check if allocation headroom is still okay. This also factors in: + // 1. Some space to absorb allocation spikes + // 2. Accumulated penalties from Degenerated and Full GC + + size_t allocation_headroom = available; + + size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; + size_t penalties = capacity / 100 * _gc_time_penalties; + + allocation_headroom -= MIN2(allocation_headroom, spike_headroom); + allocation_headroom -= MIN2(allocation_headroom, penalties); + + // TODO: Allocation rate is way too averaged to be useful during state changes + + double average_gc = _gc_time_history->avg(); + double time_since_last = time_since_last_gc(); + double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last; + + if (average_gc > allocation_headroom / allocation_rate) { + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)", + average_gc * 1000, + byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), + byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + return true; + } + + return ShenandoahHeuristics::should_start_gc(); +} + +bool ShenandoahAdaptiveHeuristics::should_start_update_refs() { + if (! _update_refs_adaptive) { + return _update_refs_early; + } + + double cycle_gap_avg = _cycle_gap_history->avg(); + double conc_mark_avg = _conc_mark_duration_history->avg(); + double conc_uprefs_avg = _conc_uprefs_duration_history->avg(); + + if (_update_refs_early) { + double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0; + if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) { + _update_refs_early = false; + } + } else { + double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0; + if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) { + _update_refs_early = true; + } + } + return _update_refs_early; +} + +const char* ShenandoahAdaptiveHeuristics::name() { + return "adaptive"; +} + +bool ShenandoahAdaptiveHeuristics::is_diagnostic() { + return false; +} + +bool ShenandoahAdaptiveHeuristics::is_experimental() { + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp 2020-01-17 17:09:39.554132163 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "utilities/numberSeq.hpp" + +class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { +private: + TruncatedSeq* _cycle_gap_history; + TruncatedSeq* _conc_mark_duration_history; + TruncatedSeq* _conc_uprefs_duration_history; + +public: + ShenandoahAdaptiveHeuristics(); + + virtual ~ShenandoahAdaptiveHeuristics(); + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free); + + void record_cycle_start(); + + virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs); + + virtual bool should_start_gc() const; + + virtual bool should_start_update_refs(); + + virtual const char* name(); + + virtual bool is_diagnostic(); + + virtual bool is_experimental(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp 2020-01-17 17:09:40.155132130 +0100 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" +#include "runtime/os.hpp" + +ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() { + // Do not shortcut evacuation + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); + + // Aggressive runs with max speed for allocation, to capture races against mutator + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing); + + // Aggressive evacuates everything, so it needs as much evac space as it can get + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow); + + // If class unloading is globally enabled, aggressive does unloading even with + // concurrent cycles. + if (ClassUnloading) { + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1); + } + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +} + +void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t free) { + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + if (r->garbage() > 0) { + cset->add_region(r); + } + } +} + +bool ShenandoahAggressiveHeuristics::should_start_gc() const { + log_info(gc)("Trigger: Start next cycle immediately"); + return true; +} + +bool ShenandoahAggressiveHeuristics::should_process_references() { + if (!can_process_references()) return false; + // Randomly process refs with 50% chance. + return (os::random() & 1) == 1; +} + +bool ShenandoahAggressiveHeuristics::should_unload_classes() { + if (!can_unload_classes_normal()) return false; + if (has_metaspace_oom()) return true; + // Randomly unload classes with 50% chance. + return (os::random() & 1) == 1; +} + +const char* ShenandoahAggressiveHeuristics::name() { + return "aggressive"; +} + +bool ShenandoahAggressiveHeuristics::is_diagnostic() { + return true; +} + +bool ShenandoahAggressiveHeuristics::is_experimental() { + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp 2020-01-17 17:09:40.764132096 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics { +public: + ShenandoahAggressiveHeuristics(); + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t free); + + virtual bool should_start_gc() const; + + virtual bool should_process_references(); + + virtual bool should_unload_classes(); + + virtual const char* name(); + + virtual bool is_diagnostic(); + + virtual bool is_experimental(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp 2020-01-17 17:09:41.378132062 +0100 @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristics() { + SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahAlwaysClearSoftRefs); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold, 10); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUncommitDelay, 1000); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGuaranteedGCInterval, 30000); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold, 10); + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +} + +bool ShenandoahCompactHeuristics::should_start_gc() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + size_t capacity = heap->max_capacity(); + size_t available = heap->free_set()->available(); + + size_t threshold_bytes_allocated = capacity / 100 * ShenandoahAllocationThreshold; + size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; + + if (available < min_threshold) { + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); + return true; + } + + size_t bytes_allocated = heap->bytes_allocated_since_gc_start(); + if (bytes_allocated > threshold_bytes_allocated) { + log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), + byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); + return true; + } + + return ShenandoahHeuristics::should_start_gc(); +} + +void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { + // Do not select too large CSet that would overflow the available free space + size_t max_cset = actual_free * 3 / 4; + + log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); + + size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; + + size_t live_cset = 0; + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + size_t new_cset = live_cset + r->get_live_data_bytes(); + if (new_cset < max_cset && r->garbage() > threshold) { + live_cset = new_cset; + cset->add_region(r); + } + } +} + +const char* ShenandoahCompactHeuristics::name() { + return "compact"; +} + +bool ShenandoahCompactHeuristics::is_diagnostic() { + return false; +} + +bool ShenandoahCompactHeuristics::is_experimental() { + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp 2020-01-17 17:09:41.981132029 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahCompactHeuristics : public ShenandoahHeuristics { +public: + ShenandoahCompactHeuristics(); + + virtual bool should_start_gc() const; + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free); + + virtual const char* name(); + + virtual bool is_diagnostic(); + + virtual bool is_experimental(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp 2020-01-17 17:09:42.582131996 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +bool ShenandoahPassiveHeuristics::should_start_gc() const { + // Never do concurrent GCs. + return false; +} + +bool ShenandoahPassiveHeuristics::should_process_references() { + // Always process references, if we can. + return can_process_references(); +} + +bool ShenandoahPassiveHeuristics::should_unload_classes() { + // Always unload classes, if we can. + return can_unload_classes(); +} + +bool ShenandoahPassiveHeuristics::should_degenerate_cycle() { + // Always fail to Degenerated GC, if enabled + return ShenandoahDegeneratedGC; +} + +void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { + assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC"); + + // Do not select too large CSet that would overflow the available free space. + // Take at least the entire evacuation reserve, and be free to overflow to free space. + size_t capacity = ShenandoahHeap::heap()->max_capacity(); + size_t available = MAX2(capacity / 100 * ShenandoahEvacReserve, actual_free); + size_t max_cset = (size_t)(available / ShenandoahEvacWaste); + + log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); + + size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; + + size_t live_cset = 0; + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + size_t new_cset = live_cset + r->get_live_data_bytes(); + if (new_cset < max_cset && r->garbage() > threshold) { + live_cset = new_cset; + cset->add_region(r); + } + } +} + +const char* ShenandoahPassiveHeuristics::name() { + return "passive"; +} + +bool ShenandoahPassiveHeuristics::is_diagnostic() { + return true; +} + +bool ShenandoahPassiveHeuristics::is_experimental() { + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp 2020-01-17 17:09:43.194131962 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahPassiveHeuristics : public ShenandoahHeuristics { +public: + virtual bool should_start_gc() const; + + virtual bool should_process_references(); + + virtual bool should_unload_classes(); + + virtual bool should_degenerate_cycle(); + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free); + + virtual const char* name(); + + virtual bool is_diagnostic(); + + virtual bool is_experimental(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp 2020-01-17 17:09:43.805131929 +0100 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics() { + // Static heuristics may degrade to continuous if live data is larger + // than free threshold. ShenandoahAllocationThreshold is supposed to break this, + // but it only works if it is non-zero. + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold, 1); + + SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +} + +ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {} + +bool ShenandoahStaticHeuristics::should_start_gc() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + size_t capacity = heap->max_capacity(); + size_t available = heap->free_set()->available(); + size_t threshold_available = capacity / 100 * ShenandoahFreeThreshold; + + if (available < threshold_available) { + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below free threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); + return true; + } + return ShenandoahHeuristics::should_start_gc(); +} + +void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t free) { + size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; + + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + if (r->garbage() > threshold) { + cset->add_region(r); + } + } +} + +const char* ShenandoahStaticHeuristics::name() { + return "static"; +} + +bool ShenandoahStaticHeuristics::is_diagnostic() { + return false; +} + +bool ShenandoahStaticHeuristics::is_experimental() { + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp 2020-01-17 17:09:44.414131895 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahStaticHeuristics : public ShenandoahHeuristics { +public: + ShenandoahStaticHeuristics(); + + virtual ~ShenandoahStaticHeuristics(); + + virtual bool should_start_gc() const; + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t free); + + virtual const char* name(); + + virtual bool is_diagnostic(); + + virtual bool is_experimental(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.cpp 2020-01-17 17:09:45.021131862 +0100 @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" +#include "utilities/quickSort.hpp" + +ShenandoahTraversalAggressiveHeuristics::ShenandoahTraversalAggressiveHeuristics() : ShenandoahHeuristics(), + _last_cset_select(0) { + // Do not shortcut evacuation + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); + + // Aggressive runs with max speed for allocation, to capture races against mutator + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing); + + // Aggressive evacuates everything, so it needs as much evac space as it can get + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow); + + // If class unloading is globally enabled, aggressive does unloading even with + // concurrent cycles. + if (ClassUnloading) { + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1); + } + +} + +bool ShenandoahTraversalAggressiveHeuristics::is_experimental() { + return false; +} + +bool ShenandoahTraversalAggressiveHeuristics::is_diagnostic() { + return true; +} + +const char* ShenandoahTraversalAggressiveHeuristics::name() { + return "traversal-aggressive"; +} + +void ShenandoahTraversalAggressiveHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); + + ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set(); + traversal_set->clear(); + + RegionData *data = get_region_data_cache(heap->num_regions()); + size_t cnt = 0; + + // About to choose the collection set, make sure we have pinned regions in correct state + heap->assert_pinned_region_status(); + + // Step 0. Prepare all regions + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->used() > 0) { + if (r->is_regular()) { + data[cnt]._region = r; + data[cnt]._garbage = r->garbage(); + data[cnt]._seqnum_last_alloc = r->seqnum_last_alloc_mutator(); + cnt++; + } + traversal_set->add_region(r); + } + } + + for (size_t i = 0; i < cnt; i++) { + if (data[i]._seqnum_last_alloc > _last_cset_select) continue; + + ShenandoahHeapRegion* r = data[i]._region; + assert (r->is_regular(), "should have been filtered before"); + + if (r->garbage() > 0) { + assert(!collection_set->is_in(r), "must not yet be in cset"); + collection_set->add_region(r); + } + } + + // Clear liveness data + // TODO: Merge it with step 0, but save live data in RegionData before. + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->used() > 0) { + r->clear_live_data(); + } + } + + collection_set->update_region_status(); + + _last_cset_select = ShenandoahHeapRegion::seqnum_current_alloc(); +} + +void ShenandoahTraversalAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) { + ShouldNotReachHere(); +} + +bool ShenandoahTraversalAggressiveHeuristics::should_start_gc() const { + log_info(gc)("Trigger: Start next cycle immediately"); + return true; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.hpp 2020-01-17 17:09:45.622131828 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALAGGRESSIVEHEURISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALAGGRESSIVEHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahTraversalAggressiveHeuristics : public ShenandoahHeuristics { +private: + uint64_t _last_cset_select; + +protected: + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free); + +public: + ShenandoahTraversalAggressiveHeuristics(); + + virtual bool is_experimental(); + + virtual bool is_diagnostic(); + + virtual const char* name(); + + virtual void choose_collection_set(ShenandoahCollectionSet* collection_set); + virtual bool should_start_gc() const; +}; + +#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALAGGRESSIVEHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp 2020-01-17 17:09:46.222131795 +0100 @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" +#include "utilities/quickSort.hpp" + +ShenandoahTraversalHeuristics::ShenandoahTraversalHeuristics() : ShenandoahHeuristics(), + _last_cset_select(0) {} + +bool ShenandoahTraversalHeuristics::is_experimental() { + return false; +} + +bool ShenandoahTraversalHeuristics::is_diagnostic() { + return false; +} + +const char* ShenandoahTraversalHeuristics::name() { + return "traversal"; +} + +void ShenandoahTraversalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); + + ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set(); + traversal_set->clear(); + + RegionData *data = get_region_data_cache(heap->num_regions()); + size_t cnt = 0; + + // About to choose the collection set, make sure we have pinned regions in correct state + heap->assert_pinned_region_status(); + + // Step 0. Prepare all regions + + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->used() > 0) { + if (r->is_regular()) { + data[cnt]._region = r; + data[cnt]._garbage = r->garbage(); + data[cnt]._seqnum_last_alloc = r->seqnum_last_alloc_mutator(); + cnt++; + } + traversal_set->add_region(r); + } + } + + // The logic for cset selection is similar to that of adaptive: + // + // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME + // during evacuation, and thus guarantee full GC. In practice, we also want to let + // application to allocate something. This is why we limit CSet to some fraction of + // available space. In non-overloaded heap, max_cset would contain all plausible candidates + // over garbage threshold. + // + // 2. We should not get cset too low so that free threshold would not be met right + // after the cycle. Otherwise we get back-to-back cycles for no reason if heap is + // too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero. + // + // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates + // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before + // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, + // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. + // + // The significant complication is that liveness data was collected at the previous cycle, and only + // for those regions that were allocated before previous cycle started. + + size_t capacity = heap->max_capacity(); + size_t actual_free = heap->free_set()->available(); + size_t free_target = capacity / 100 * ShenandoahMinFreeThreshold; + size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0; + size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); + + log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " + SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), + byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage)); + + // Better select garbage-first regions, and then older ones + QuickSort::sort(data, (int) cnt, compare_by_garbage_then_alloc_seq_ascending, false); + + size_t cur_cset = 0; + size_t cur_garbage = 0; + + size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() / 100 * ShenandoahGarbageThreshold; + + // Step 1. Add trustworthy regions to collection set. + // + // We can trust live/garbage data from regions that were fully traversed during + // previous cycle. Even if actual liveness is different now, we can only have _less_ + // live objects, because dead objects are not resurrected. Which means we can undershoot + // the collection set, but not overshoot it. + + for (size_t i = 0; i < cnt; i++) { + if (data[i]._seqnum_last_alloc > _last_cset_select) continue; + + ShenandoahHeapRegion* r = data[i]._region; + assert (r->is_regular(), "should have been filtered before"); + + size_t new_garbage = cur_garbage + r->garbage(); + size_t new_cset = cur_cset + r->get_live_data_bytes(); + + if (new_cset > max_cset) { + break; + } + + if ((new_garbage < min_garbage) || (r->garbage() > garbage_threshold)) { + assert(!collection_set->is_in(r), "must not yet be in cset"); + collection_set->add_region(r); + cur_cset = new_cset; + cur_garbage = new_garbage; + } + } + + // Step 2. Try to catch some recently allocated regions for evacuation ride. + // + // Pessimistically assume we are going to evacuate the entire region. While this + // is very pessimistic and in most cases undershoots the collection set when regions + // are mostly dead, it also provides more safety against running into allocation + // failure when newly allocated regions are fully live. + + for (size_t i = 0; i < cnt; i++) { + if (data[i]._seqnum_last_alloc <= _last_cset_select) continue; + + ShenandoahHeapRegion* r = data[i]._region; + assert (r->is_regular(), "should have been filtered before"); + + // size_t new_garbage = cur_garbage + 0; (implied) + size_t new_cset = cur_cset + r->used(); + + if (new_cset > max_cset) { + break; + } + + assert(!collection_set->is_in(r), "must not yet be in cset"); + collection_set->add_region(r); + cur_cset = new_cset; + } + + // Step 3. Clear liveness data + // TODO: Merge it with step 0, but save live data in RegionData before. + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->used() > 0) { + r->clear_live_data(); + } + } + + collection_set->update_region_status(); + + _last_cset_select = ShenandoahHeapRegion::seqnum_current_alloc(); +} + +bool ShenandoahTraversalHeuristics::should_start_gc() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(!heap->has_forwarded_objects(), "no forwarded objects here"); + + size_t capacity = heap->max_capacity(); + size_t available = heap->free_set()->available(); + + // Check if we are falling below the worst limit, time to trigger the GC, regardless of + // anything else. + size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; + if (available < min_threshold) { + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); + return true; + } + + // Check if are need to learn a bit about the application + const size_t max_learn = ShenandoahLearningSteps; + if (_gc_times_learned < max_learn) { + size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; + if (available < init_threshold) { + log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + _gc_times_learned + 1, max_learn, + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); + return true; + } + } + + // Check if allocation headroom is still okay. This also factors in: + // 1. Some space to absorb allocation spikes + // 2. Accumulated penalties from Degenerated and Full GC + + size_t allocation_headroom = available; + + size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; + size_t penalties = capacity / 100 * _gc_time_penalties; + + allocation_headroom -= MIN2(allocation_headroom, spike_headroom); + allocation_headroom -= MIN2(allocation_headroom, penalties); + + double average_gc = _gc_time_history->avg(); + double time_since_last = time_since_last_gc(); + double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last; + + if (average_gc > allocation_headroom / allocation_rate) { + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)", + average_gc * 1000, + byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), + byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + return true; + } else if (ShenandoahHeuristics::should_start_gc()) { + return true; + } + + return false; +} + +void ShenandoahTraversalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) { + ShouldNotReachHere(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp 2020-01-17 17:09:46.832131762 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeuristics.hpp" + +class ShenandoahTraversalHeuristics : public ShenandoahHeuristics { +private: + uint64_t _last_cset_select; + +protected: + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free); + +public: + ShenandoahTraversalHeuristics(); + + virtual bool is_experimental(); + + virtual bool is_diagnostic(); + + virtual const char* name(); + + virtual void choose_collection_set(ShenandoahCollectionSet* collection_set); + + virtual bool should_start_gc() const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp 2020-01-17 17:09:47.438131728 +0100 @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP + +#include "memory/allocation.hpp" + +class ShenandoahAllocRequest : StackObj { +public: + enum Type { + _alloc_shared, // Allocate common, outside of TLAB + _alloc_shared_gc, // Allocate common, outside of GCLAB + _alloc_tlab, // Allocate TLAB + _alloc_gclab, // Allocate GCLAB + _ALLOC_LIMIT + }; + + static const char* alloc_type_to_string(Type type) { + switch (type) { + case _alloc_shared: + return "Shared"; + case _alloc_shared_gc: + return "Shared GC"; + case _alloc_tlab: + return "TLAB"; + case _alloc_gclab: + return "GCLAB"; + default: + ShouldNotReachHere(); + return ""; + } + } + +private: + size_t _min_size; + size_t _requested_size; + size_t _actual_size; + Type _alloc_type; +#ifdef ASSERT + bool _actual_size_set; +#endif + + ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type) : + _min_size(_min_size), _requested_size(_requested_size), + _actual_size(0), _alloc_type(_alloc_type) +#ifdef ASSERT + , _actual_size_set(false) +#endif + {} + +public: + static inline ShenandoahAllocRequest for_tlab(size_t min_size, size_t requested_size) { + return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab); + } + + static inline ShenandoahAllocRequest for_gclab(size_t min_size, size_t requested_size) { + return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab); + } + + static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size) { + return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc); + } + + static inline ShenandoahAllocRequest for_shared(size_t requested_size) { + return ShenandoahAllocRequest(0, requested_size, _alloc_shared); + } + + inline size_t size() { + return _requested_size; + } + + inline Type type() { + return _alloc_type; + } + + inline size_t min_size() { + assert (is_lab_alloc(), "Only access for LAB allocs"); + return _min_size; + } + + inline size_t actual_size() { + assert (_actual_size_set, "Should be set"); + return _actual_size; + } + + inline void set_actual_size(size_t v) { +#ifdef ASSERT + assert (!_actual_size_set, "Should not be set"); + _actual_size_set = true; +#endif + _actual_size = v; + } + + inline bool is_mutator_alloc() { + switch (_alloc_type) { + case _alloc_tlab: + case _alloc_shared: + return true; + case _alloc_gclab: + case _alloc_shared_gc: + return false; + default: + ShouldNotReachHere(); + return false; + } + } + + inline bool is_gc_alloc() { + switch (_alloc_type) { + case _alloc_tlab: + case _alloc_shared: + return false; + case _alloc_gclab: + case _alloc_shared_gc: + return true; + default: + ShouldNotReachHere(); + return false; + } + } + + inline bool is_lab_alloc() { + switch (_alloc_type) { + case _alloc_tlab: + case _alloc_gclab: + return true; + case _alloc_shared: + case _alloc_shared_gc: + return false; + default: + ShouldNotReachHere(); + return false; + } + } +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.cpp 2020-01-17 17:09:48.044131695 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahAllocTracker.hpp" +#include "utilities/ostream.hpp" + +void ShenandoahAllocTracker::print_on(outputStream* out) const { + out->print_cr("ALLOCATION TRACING"); + out->print_cr(" These are the slow-path allocations, including TLAB/GCLAB refills, and out-of-TLAB allocations."); + out->print_cr(" In-TLAB/GCLAB allocations happen orders of magnitude more frequently, and without delays."); + out->cr(); + + out->print("%22s", ""); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print("%12s", ShenandoahAllocRequest::alloc_type_to_string(ShenandoahAllocRequest::Type(t))); + } + out->cr(); + + out->print_cr("Counts:"); + out->print("%22s", "#"); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print(SIZE_FORMAT_W(12), _alloc_size[t].num()); + } + out->cr(); + out->cr(); + + // Figure out max and min levels + int lat_min_level = +1000; + int lat_max_level = -1000; + int size_min_level = +1000; + int size_max_level = -1000; + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + lat_min_level = MIN2(lat_min_level, _alloc_latency[t].min_level()); + lat_max_level = MAX2(lat_max_level, _alloc_latency[t].max_level()); + size_min_level = MIN2(size_min_level, _alloc_size[t].min_level()); + size_max_level = MAX2(size_max_level, _alloc_size[t].max_level()); + } + + out->print_cr("Latency summary:"); + out->print("%22s", "sum, ms:"); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print(SIZE_FORMAT_W(12), _alloc_latency[t].sum() / K); + } + out->cr(); + out->cr(); + + out->print_cr("Sizes summary:"); + out->print("%22s", "sum, M:"); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print(SIZE_FORMAT_W(12), _alloc_size[t].sum() * HeapWordSize / M); + } + out->cr(); + out->cr(); + + out->print_cr("Latency histogram (time in microseconds):"); + for (int c = lat_min_level; c <= lat_max_level; c++) { + out->print("%9d - %9d:", (c == 0) ? 0 : 1 << (c - 1), 1 << c); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print(SIZE_FORMAT_W(12), _alloc_latency[t].level(c)); + } + out->cr(); + } + out->cr(); + + out->print_cr("Sizes histogram (size in bytes):"); + for (int c = size_min_level; c <= size_max_level; c++) { + int l = (c == 0) ? 0 : 1 << (c - 1); + int r = 1 << c; + out->print("%9d - %9d:", l * HeapWordSize, r * HeapWordSize); + for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) { + out->print(SIZE_FORMAT_W(12), _alloc_size[t].level(c)); + } + out->cr(); + } + out->cr(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.hpp 2020-01-17 17:09:48.653131661 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP + +#include "gc/shenandoah/shenandoahAllocRequest.hpp" +#include "gc/shenandoah/shenandoahNumberSeq.hpp" +#include "memory/allocation.hpp" +#include "utilities/ostream.hpp" + +class ShenandoahAllocTracker : public CHeapObj { +private: + BinaryMagnitudeSeq _alloc_size[ShenandoahAllocRequest::_ALLOC_LIMIT]; + BinaryMagnitudeSeq _alloc_latency[ShenandoahAllocRequest::_ALLOC_LIMIT]; + +public: + void record_alloc_latency(size_t words_size, + ShenandoahAllocRequest::Type _alloc_type, + double latency_us) { + _alloc_size[_alloc_type].add(words_size); + _alloc_latency[_alloc_type].add((size_t)latency_us); + } + + void print_on(outputStream* out) const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp 2020-01-17 17:09:49.263131628 +0100 @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/gcArguments.inline.hpp" +#include "gc/shenandoah/shenandoahArguments.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "utilities/defaultStream.hpp" + +void ShenandoahArguments::initialize() { +#if !(defined AARCH64 || defined AMD64 || defined IA32) + vm_exit_during_initialization("Shenandoah GC is not supported on this platform."); +#endif + +#if 0 // leave this block as stepping stone for future platforms + log_warning(gc)("Shenandoah GC is not fully supported on this platform:"); + log_warning(gc)(" concurrent modes are not supported, only STW cycles are enabled;"); + log_warning(gc)(" arch-specific barrier code is not implemented, disabling barriers;"); + + FLAG_SET_DEFAULT(ShenandoahGCHeuristics, "passive"); + + FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); + FLAG_SET_DEFAULT(ShenandoahLoadRefBarrier, false); + FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false); + FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, false); + FLAG_SET_DEFAULT(ShenandoahCASBarrier, false); + FLAG_SET_DEFAULT(ShenandoahCloneBarrier, false); + + FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false); +#endif + + if (UseLargePages && (MaxHeapSize / os::large_page_size()) < ShenandoahHeapRegion::MIN_NUM_REGIONS) { + warning("Large pages size (" SIZE_FORMAT "K) is too large to afford page-sized regions, disabling uncommit", + os::large_page_size() / K); + FLAG_SET_DEFAULT(ShenandoahUncommit, false); + } + + // Enable NUMA by default. While Shenandoah is not NUMA-aware, enabling NUMA makes + // storage allocation code NUMA-aware. + if (FLAG_IS_DEFAULT(UseNUMA)) { + FLAG_SET_DEFAULT(UseNUMA, true); + } + + // Set up default number of concurrent threads. We want to have cycles complete fast + // enough, but we also do not want to steal too much CPU from the concurrently running + // application. Using 1/4 of available threads for concurrent GC seems a good + // compromise here. + bool ergo_conc = FLAG_IS_DEFAULT(ConcGCThreads); + if (ergo_conc) { + FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::processor_count() / 4)); + } + + if (ConcGCThreads == 0) { + vm_exit_during_initialization("Shenandoah expects ConcGCThreads > 0, check -XX:ConcGCThreads=#"); + } + + // Set up default number of parallel threads. We want to have decent pauses performance + // which would use parallel threads, but we also do not want to do too many threads + // that will overwhelm the OS scheduler. Using 1/2 of available threads seems to be a fair + // compromise here. Due to implementation constraints, it should not be lower than + // the number of concurrent threads. + bool ergo_parallel = FLAG_IS_DEFAULT(ParallelGCThreads); + if (ergo_parallel) { + FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::processor_count() / 2)); + } + + if (ParallelGCThreads == 0) { + vm_exit_during_initialization("Shenandoah expects ParallelGCThreads > 0, check -XX:ParallelGCThreads=#"); + } + + // Make sure ergonomic decisions do not break the thread count invariants. + // This may happen when user overrides one of the flags, but not the other. + // When that happens, we want to adjust the setting that was set ergonomically. + if (ParallelGCThreads < ConcGCThreads) { + if (ergo_conc && !ergo_parallel) { + FLAG_SET_DEFAULT(ConcGCThreads, ParallelGCThreads); + } else if (!ergo_conc && ergo_parallel) { + FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads); + } else if (ergo_conc && ergo_parallel) { + // Should not happen, check the ergonomic computation above. Fail with relevant error. + vm_exit_during_initialization("Shenandoah thread count ergonomic error"); + } else { + // User settings error, report and ask user to rectify. + vm_exit_during_initialization("Shenandoah expects ConcGCThreads <= ParallelGCThreads, check -XX:ParallelGCThreads, -XX:ConcGCThreads"); + } + } + + if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) { + FLAG_SET_DEFAULT(ParallelRefProcEnabled, true); + } + + if (ShenandoahRegionSampling && FLAG_IS_DEFAULT(PerfDataMemorySize)) { + // When sampling is enabled, max out the PerfData memory to get more + // Shenandoah data in, including Matrix. + FLAG_SET_DEFAULT(PerfDataMemorySize, 2048*K); + } + +#ifdef COMPILER2 + // Shenandoah cares more about pause times, rather than raw throughput. + if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) { + FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true); + if (FLAG_IS_DEFAULT(LoopStripMiningIter)) { + FLAG_SET_DEFAULT(LoopStripMiningIter, 1000); + } + } +#ifdef ASSERT + // C2 barrier verification is only reliable when all default barriers are enabled + if (ShenandoahVerifyOptoBarriers && + (!FLAG_IS_DEFAULT(ShenandoahSATBBarrier) || + !FLAG_IS_DEFAULT(ShenandoahLoadRefBarrier) || + !FLAG_IS_DEFAULT(ShenandoahKeepAliveBarrier) || + !FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) || + !FLAG_IS_DEFAULT(ShenandoahCASBarrier) || + !FLAG_IS_DEFAULT(ShenandoahCloneBarrier) + )) { + warning("Unusual barrier configuration, disabling C2 barrier verification"); + FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false); + } +#else + guarantee(!ShenandoahVerifyOptoBarriers, "Should be disabled"); +#endif // ASSERT +#endif // COMPILER2 + + if (AlwaysPreTouch) { + // Shenandoah handles pre-touch on its own. It does not let the + // generic storage code to do the pre-touch before Shenandoah has + // a chance to do it on its own. + FLAG_SET_DEFAULT(AlwaysPreTouch, false); + FLAG_SET_DEFAULT(ShenandoahAlwaysPreTouch, true); + } + + // Shenandoah C2 optimizations apparently dislike the shape of thread-local handshakes. + // Disable it by default, unless we enable it specifically for debugging. + if (FLAG_IS_DEFAULT(ThreadLocalHandshakes)) { + if (ThreadLocalHandshakes) { + FLAG_SET_DEFAULT(ThreadLocalHandshakes, false); + } + } else { + if (ThreadLocalHandshakes) { + warning("Thread-local handshakes are not working correctly with Shenandoah at the moment. Enable at your own risk."); + } + } + + // Record more information about previous cycles for improved debugging pleasure + if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) { + FLAG_SET_DEFAULT(LogEventsBufferEntries, 250); + } + + if (ShenandoahAlwaysPreTouch) { + if (!FLAG_IS_DEFAULT(ShenandoahUncommit)) { + warning("AlwaysPreTouch is enabled, disabling ShenandoahUncommit"); + } + FLAG_SET_DEFAULT(ShenandoahUncommit, false); + } + + if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) { + log_info(gc)("Min heap equals to max heap, disabling ShenandoahUncommit"); + FLAG_SET_DEFAULT(ShenandoahUncommit, false); + } + + // If class unloading is disabled, no unloading for concurrent cycles as well. + // If class unloading is enabled, users should opt-in for unloading during + // concurrent cycles. + if (!ClassUnloading || !FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark)) { + log_info(gc)("Consider -XX:+ClassUnloadingWithConcurrentMark if large pause times " + "are observed on class-unloading sensitive workloads"); + FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); + } + + // AOT is not supported yet + if (UseAOT) { + if (!FLAG_IS_DEFAULT(UseAOT)) { + warning("Shenandoah does not support AOT at this moment, disabling UseAOT"); + } + FLAG_SET_DEFAULT(UseAOT, false); + } + + // TLAB sizing policy makes resizing decisions before each GC cycle. It averages + // historical data, assigning more recent data the weight according to TLABAllocationWeight. + // Current default is good for generational collectors that run frequent young GCs. + // With Shenandoah, GC cycles are much less frequent, so we need we need sizing policy + // to converge faster over smaller number of resizing decisions. + if (FLAG_IS_DEFAULT(TLABAllocationWeight)) { + FLAG_SET_DEFAULT(TLABAllocationWeight, 90); + } + + // Shenandoah needs more C2 nodes to compile some methods with lots of barriers. + // NodeLimitFudgeFactor needs to stay the same relative to MaxNodeLimit. +#ifdef COMPILER2 + if (FLAG_IS_DEFAULT(MaxNodeLimit)) { + FLAG_SET_DEFAULT(MaxNodeLimit, MaxNodeLimit * 3); + FLAG_SET_DEFAULT(NodeLimitFudgeFactor, NodeLimitFudgeFactor * 3); + } +#endif + + // Make sure safepoint deadlocks are failing predictably. This sets up VM to report + // fatal error after 10 seconds of wait for safepoint syncronization (not the VM + // operation itself). There is no good reason why Shenandoah would spend that + // much time synchronizing. +#ifdef ASSERT + FLAG_SET_DEFAULT(SafepointTimeout, true); + FLAG_SET_DEFAULT(SafepointTimeoutDelay, 10000); + FLAG_SET_DEFAULT(AbortVMOnSafepointTimeout, true); +#endif +} + +size_t ShenandoahArguments::conservative_max_heap_alignment() { + size_t align = ShenandoahMaxRegionSize; + if (UseLargePages) { + align = MAX2(align, os::large_page_size()); + } + return align; +} + +CollectedHeap* ShenandoahArguments::create_heap() { + return create_heap_with_policy(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp 2020-01-17 17:09:49.865131595 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP + +#include "gc/shared/gcArguments.hpp" + +class CollectedHeap; + +class ShenandoahArguments : public GCArguments { +public: + virtual void initialize(); + + virtual size_t conservative_max_heap_alignment(); + + virtual CollectedHeap* create_heap(); +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp 2020-01-17 17:09:50.471131561 +0100 @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "memory/resourceArea.hpp" + +void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) { + // Be extra safe. Only access data that is guaranteed to be safe: + // should be in heap, in known committed region, within that region. + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (!heap->is_in(loc)) return; + + ShenandoahHeapRegion* r = heap->heap_region_containing(loc); + if (r != NULL && r->is_committed()) { + address start = MAX2((address) r->bottom(), (address) loc - 32); + address end = MIN2((address) r->end(), (address) loc + 128); + if (start >= end) return; + + stringStream ss; + os::print_hex_dump(&ss, start, end, 4); + msg.append("\n"); + msg.append("Raw heap memory:\n%s", ss.as_string()); + } +} + +void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapRegion *r = heap->heap_region_containing(obj); + + ResourceMark rm; + stringStream ss; + r->print_on(&ss); + + stringStream mw_ss; + obj->mark()->print_on(&mw_ss); + + ShenandoahMarkingContext* const ctx = heap->marking_context(); + + msg.append(" " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name()); + msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start((HeapWord *) obj) ? "" : "not"); + msg.append(" %3s marked \n", ctx->is_marked(obj) ? "" : "not"); + msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); + if (heap->traversal_gc() != NULL) { + msg.append(" %3s in traversal set\n", heap->traversal_gc()->traversal_set()->is_in((HeapWord*) obj) ? "" : "not"); + } + msg.append(" mark:%s\n", mw_ss.as_string()); + msg.append(" region: %s", ss.as_string()); +} + +void ShenandoahAsserts::print_non_obj(ShenandoahMessageBuffer& msg, void* loc) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->is_in(loc)) { + msg.append(" inside Java heap\n"); + ShenandoahHeapRegion *r = heap->heap_region_containing(loc); + stringStream ss; + r->print_on(&ss); + + msg.append(" %3s in collection set\n", heap->in_collection_set(loc) ? "" : "not"); + msg.append(" region: %s", ss.as_string()); + } else { + msg.append(" outside of Java heap\n"); + stringStream ss; + os::print_location(&ss, (intptr_t) loc, false); + msg.append(" %s", ss.as_string()); + } +} + +void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + msg.append(" " PTR_FORMAT " - safe print, no details\n", p2i(loc)); + if (heap->is_in(loc)) { + ShenandoahHeapRegion* r = heap->heap_region_containing(loc); + if (r != NULL) { + stringStream ss; + r->print_on(&ss); + msg.append(" region: %s", ss.as_string()); + print_raw_memory(msg, loc); + } + } +} + +void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_loc, oop loc, + const char* phase, const char* label, + const char* file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ResourceMark rm; + + bool loc_in_heap = (loc != NULL && heap->is_in(loc)); + + ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label); + + msg.append("Referenced from:\n"); + if (interior_loc != NULL) { + msg.append(" interior location: " PTR_FORMAT "\n", p2i(interior_loc)); + if (loc_in_heap) { + print_obj(msg, loc); + } else { + print_non_obj(msg, interior_loc); + } + } else { + msg.append(" no interior location recorded (probably a plain heap scan, or detached oop)\n"); + } + msg.append("\n"); + + msg.append("Object:\n"); + if (level >= _safe_oop) { + print_obj(msg, obj); + } else { + print_obj_safe(msg, obj); + } + msg.append("\n"); + + if (level >= _safe_oop) { + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + msg.append("Forwardee:\n"); + if (obj != fwd) { + if (level >= _safe_oop_fwd) { + print_obj(msg, fwd); + } else { + print_obj_safe(msg, fwd); + } + } else { + msg.append(" (the object itself)"); + } + msg.append("\n"); + } + + if (level >= _safe_oop_fwd) { + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); + if (fwd != fwd2) { + msg.append("Second forwardee:\n"); + print_obj_safe(msg, fwd2); + msg.append("\n"); + } + } + + report_vm_error(file, line, msg.buffer()); +} + +void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + + if (!heap->is_in(obj)) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap failed", + "oop must point to a heap address", + file, line); + } +} + +void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + + // Step 1. Check that obj is correct. + // After this step, it is safe to call heap_region_containing(). + if (!heap->is_in(obj)) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "oop must point to a heap address", + file, line); + } + + Klass* obj_klass = obj->klass_or_null(); + if (obj_klass == NULL) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Object klass pointer should not be NULL", + file,line); + } + + if (!Metaspace::contains(obj_klass)) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Object klass pointer must go to metaspace", + file,line); + } + + oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); + + if (obj != fwd) { + // When Full GC moves the objects, we cannot trust fwdptrs. If we got here, it means something + // tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr + // that still points to the object itself. + if (heap->is_full_gc_move_in_progress()) { + print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Non-trivial forwarding pointer during Full GC moves, probable bug.", + file, line); + } + + // Step 2. Check that forwardee is correct + if (!heap->is_in(fwd)) { + print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Forwardee must point to a heap address", + file, line); + } + + if (obj_klass != fwd->klass()) { + print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Forwardee klass disagrees with object class", + file, line); + } + + // Step 3. Check that forwardee points to correct region + if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Non-trivial forwardee should in another region", + file, line); + } + + // Step 4. Check for multiple forwardings + oop fwd2 = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(fwd)); + if (fwd != fwd2) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed", + "Multiple forwardings", + file, line); + } + } +} + +void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line) { + assert_correct(interior_loc, obj, file, line); + + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + ShenandoahHeapRegion* r = heap->heap_region_containing(obj); + if (!r->is_active()) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", + "Object must reside in active region", + file, line); + } + + size_t alloc_size = obj->size(); + if (alloc_size > ShenandoahHeapRegion::humongous_threshold_words()) { + size_t idx = r->region_number(); + size_t num_regions = ShenandoahHeapRegion::required_regions(alloc_size * HeapWordSize); + for (size_t i = idx; i < idx + num_regions; i++) { + ShenandoahHeapRegion* chain_reg = heap->get_region(i); + if (i == idx && !chain_reg->is_humongous_start()) { + print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", + "Object must reside in humongous start", + file, line); + } + if (i != idx && !chain_reg->is_humongous_continuation()) { + print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", + "Humongous continuation should be of proper size", + file, line); + } + } + } +} + +void ShenandoahAsserts::assert_forwarded(void* interior_loc, oop obj, const char* file, int line) { + assert_correct(interior_loc, obj, file, line); + oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); + + if (obj == fwd) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed", + "Object should be forwarded", + file, line); + } +} + +void ShenandoahAsserts::assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line) { + assert_correct(interior_loc, obj, file, line); + oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); + + if (obj != fwd) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed", + "Object should not be forwarded", + file, line); + } +} + +void ShenandoahAsserts::assert_marked(void *interior_loc, oop obj, const char *file, int line) { + assert_correct(interior_loc, obj, file, line); + + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + if (!heap->marking_context()->is_marked(obj)) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked failed", + "Object should be marked", + file, line); + } +} + +void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char* file, int line) { + assert_correct(interior_loc, obj, file, line); + + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + if (!heap->in_collection_set(obj)) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_in_cset failed", + "Object should be in collection set", + file, line); + } +} + +void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line) { + assert_correct(interior_loc, obj, file, line); + + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + if (heap->in_collection_set(obj)) { + print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_in_cset failed", + "Object should not be in collection set", + file, line); + } +} + +void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + if (heap->in_collection_set(interior_loc)) { + print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed", + "Interior location should not be in collection set", + file, line); + } +} + +void ShenandoahAsserts::print_rp_failure(const char *label, BoolObjectClosure* actual, + const char *file, int line) { + ShenandoahMessageBuffer msg("%s\n", label); + msg.append(" Actual: " PTR_FORMAT "\n", p2i(actual)); + report_vm_error(file, line, msg.buffer()); +} + +void ShenandoahAsserts::assert_rp_isalive_not_installed(const char *file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ReferenceProcessor* rp = heap->ref_processor(); + if (rp->is_alive_non_header() != NULL) { + print_rp_failure("Shenandoah assert_rp_isalive_not_installed failed", rp->is_alive_non_header(), + file, line); + } +} + +void ShenandoahAsserts::assert_rp_isalive_installed(const char *file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ReferenceProcessor* rp = heap->ref_processor(); + if (rp->is_alive_non_header() == NULL) { + print_rp_failure("Shenandoah assert_rp_isalive_installed failed", rp->is_alive_non_header(), + file, line); + } +} + +void ShenandoahAsserts::assert_locked_or_shenandoah_safepoint(const Monitor* lock, const char* file, int line) { + if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { + return; + } + + if (lock->owned_by_self()) { + return; + } + + ShenandoahMessageBuffer msg("Must ba at a Shenandoah safepoint or held %s lock", lock->name()); + report_vm_error(file, line, msg.buffer()); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp 2020-01-17 17:09:51.076131528 +0100 @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP + +#include "memory/iterator.hpp" +#include "runtime/mutex.hpp" +#include "utilities/formatBuffer.hpp" + +typedef FormatBuffer<8192> ShenandoahMessageBuffer; + +class ShenandoahAsserts { +public: + enum SafeLevel { + _safe_unknown, + _safe_oop, + _safe_oop_fwd, + _safe_all + }; + + static void print_obj(ShenandoahMessageBuffer &msg, oop obj); + + static void print_non_obj(ShenandoahMessageBuffer &msg, void *loc); + + static void print_obj_safe(ShenandoahMessageBuffer &msg, void *loc); + + static void print_failure(SafeLevel level, oop obj, void *interior_loc, oop loc, + const char *phase, const char *label, + const char *file, int line); + + static void print_rp_failure(const char *label, BoolObjectClosure* actual, + const char *file, int line); + + static void assert_in_heap(void* interior_loc, oop obj, const char* file, int line); + static void assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line); + + static void assert_correct(void* interior_loc, oop obj, const char* file, int line); + static void assert_forwarded(void* interior_loc, oop obj, const char* file, int line); + static void assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line); + static void assert_marked(void* interior_loc, oop obj, const char* file, int line); + static void assert_in_cset(void* interior_loc, oop obj, const char* file, int line); + static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line); + static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line); + + static void assert_rp_isalive_not_installed(const char *file, int line); + static void assert_rp_isalive_installed(const char *file, int line); + + static void assert_locked_or_shenandoah_safepoint(const Monitor* lock, const char*file, int line); + +#ifdef ASSERT +#define shenandoah_assert_in_heap(interior_loc, obj) \ + ShenandoahAsserts::assert_in_heap(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_in_correct_region(interior_loc, obj) \ + ShenandoahAsserts::assert_in_correct_region(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_correct_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_correct_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_correct(interior_loc, obj) \ + ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_forwarded_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_forwarded_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_forwarded(interior_loc, obj) \ + ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_not_forwarded(interior_loc, obj) \ + ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_marked_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_marked_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_marked(interior_loc, obj) \ + ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_in_cset(interior_loc, obj) \ + ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition) \ + if (condition) ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception) \ + if (!(exception)) ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__); +#define shenandoah_assert_not_in_cset(interior_loc, obj) \ + ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__); + +#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition) \ + if (condition) ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__); +#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception) \ + if (!(exception)) ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__); +#define shenandoah_assert_not_in_cset_loc(interior_loc) \ + ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__); + +#define shenandoah_assert_rp_isalive_installed() \ + ShenandoahAsserts::assert_rp_isalive_installed(__FILE__, __LINE__); +#define shenandoah_assert_rp_isalive_not_installed() \ + ShenandoahAsserts::assert_rp_isalive_not_installed(__FILE__, __LINE__); + +#define shenandoah_assert_safepoint() \ + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at Shenandoah Safepoints"); + +#define shenandoah_assert_locked_or_safepoint(lock) \ + ShenandoahAsserts::assert_locked_or_shenandoah_safepoint(lock, __FILE__, __LINE__); +#else +#define shenandoah_assert_in_heap(interior_loc, obj) +#define shenandoah_assert_in_correct_region(interior_loc, obj) + +#define shenandoah_assert_correct_if(interior_loc, obj, condition) +#define shenandoah_assert_correct_except(interior_loc, obj, exception) +#define shenandoah_assert_correct(interior_loc, obj) + +#define shenandoah_assert_forwarded_if(interior_loc, obj, condition) +#define shenandoah_assert_forwarded_except(interior_loc, obj, exception) +#define shenandoah_assert_forwarded(interior_loc, obj) + +#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition) +#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception) +#define shenandoah_assert_not_forwarded(interior_loc, obj) + +#define shenandoah_assert_marked_if(interior_loc, obj, condition) +#define shenandoah_assert_marked_except(interior_loc, obj, exception) +#define shenandoah_assert_marked(interior_loc, obj) + +#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) +#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) +#define shenandoah_assert_in_cset(interior_loc, obj) + +#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition) +#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception) +#define shenandoah_assert_not_in_cset(interior_loc, obj) + +#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition) +#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception) +#define shenandoah_assert_not_in_cset_loc(interior_loc) + +#define shenandoah_assert_rp_isalive_installed() +#define shenandoah_assert_rp_isalive_not_installed() + +#define shenandoah_assert_safepoint() +#define shenandoah_assert_locked_or_safepoint(lock) + +#endif + +#define shenandoah_not_implemented \ + { fatal("Deliberately not implemented."); } +#define shenandoah_not_implemented_return(v) \ + { fatal("Deliberately not implemented."); return v; } + +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp 2020-01-17 17:09:51.679131495 +0100 @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp" +#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "memory/iterator.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#ifdef COMPILER1 +#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" +#endif +#ifdef COMPILER2 +#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" +#endif + +class ShenandoahBarrierSetC1; +class ShenandoahBarrierSetC2; + +ShenandoahSATBMarkQueueSet ShenandoahBarrierSet::_satb_mark_queue_set; + +ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) : + BarrierSet(make_barrier_set_assembler(), + make_barrier_set_c1(), + make_barrier_set_c2(), + BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)), + _heap(heap) +{ +} + +ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() { + BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler(); + return reinterpret_cast(bsa); +} + +void ShenandoahBarrierSet::print_on(outputStream* st) const { + st->print("ShenandoahBarrierSet"); +} + +bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) { + return bsn == BarrierSet::ShenandoahBarrierSet; +} + +bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) { + return true; +} + +bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) { + if (!ShenandoahLoadRefBarrier) return false; + // Only needed for references + return is_reference_type(type); +} + +bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) { + if (!ShenandoahKeepAliveBarrier) return false; + // Only needed for references + if (!is_reference_type(type)) return false; + + bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; + bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; + bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode(); + bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0; + return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode); +} + +oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { + if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { + return load_reference_barrier_impl(obj); + } else { + return obj; + } +} + +oop ShenandoahBarrierSet::load_reference_barrier(oop obj) { + if (obj != NULL) { + return load_reference_barrier_not_null(obj); + } else { + return obj; + } +} + +oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) { + return load_reference_barrier_mutator_work(obj, load_addr); +} + +oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) { + return load_reference_barrier_mutator_work(obj, load_addr); +} + +template +oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) { + assert(ShenandoahLoadRefBarrier, "should be enabled"); + shenandoah_assert_in_cset(load_addr, obj); + + oop fwd = resolve_forwarded_not_null(obj); + if (obj == fwd) { + assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), + "evac should be in progress"); + + ShenandoahEvacOOMScope oom_evac_scope; + + Thread* thread = Thread::current(); + oop res_oop = _heap->evacuate_object(obj, thread); + + // Since we are already here and paid the price of getting through runtime call adapters + // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects, + // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate + // total assist costs, and can introduce a lot of evacuation latency. This is why we + // only scan for _nearest_ N objects, regardless if they are eligible for evac or not. + // The scan itself should also avoid touching the non-marked objects below TAMS, because + // their metadata (notably, klasses) may be incorrect already. + + size_t max = ShenandoahEvacAssist; + if (max > 0) { + // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark. + // Other code uses complete marking context, because evac happens after the mark. + ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ? + _heap->marking_context() : _heap->complete_marking_context(); + + ShenandoahHeapRegion* r = _heap->heap_region_containing(obj); + assert(r->is_cset(), "sanity"); + + HeapWord* cur = (HeapWord*)obj + obj->size(); + + size_t count = 0; + while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) { + oop cur_oop = oop(cur); + if (cur_oop == resolve_forwarded_not_null(cur_oop)) { + _heap->evacuate_object(cur_oop, thread); + } + cur = cur + cur_oop->size(); + } + } + + fwd = res_oop; + } + + if (load_addr != NULL && fwd != obj) { + // Since we are here and we know the load address, update the reference. + ShenandoahHeap::cas_oop(fwd, load_addr, obj); + } + + return fwd; +} + +oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) { + assert(ShenandoahLoadRefBarrier, "should be enabled"); + if (!CompressedOops::is_null(obj)) { + bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); + oop fwd = resolve_forwarded_not_null(obj); + if (evac_in_progress && + _heap->in_collection_set(obj) && + obj == fwd) { + Thread *t = Thread::current(); + ShenandoahEvacOOMScope oom_evac_scope; + return _heap->evacuate_object(obj, t); + } else { + return fwd; + } + } else { + return obj; + } +} + +void ShenandoahBarrierSet::on_thread_create(Thread* thread) { + // Create thread local data + ShenandoahThreadLocalData::create(thread); +} + +void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) { + // Destroy thread local data + ShenandoahThreadLocalData::destroy(thread); +} + +void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) { + assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint"); + assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active"); + assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty"); + if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) { + ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true); + } + ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state()); + ShenandoahThreadLocalData::initialize_gclab(thread); +} + +void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) { + ShenandoahThreadLocalData::satb_mark_queue(thread).flush(); + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + if (gclab != NULL) { + gclab->retire(); + } +} + +void ShenandoahBarrierSet::clone_barrier_runtime(oop src) { + if (_heap->has_forwarded_objects()) { + clone_barrier(src); + } +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp 2020-01-17 17:09:52.273131462 +0100 @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP + +#include "gc/shared/accessBarrierSupport.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahSATBMarkQueue.hpp" + +class ShenandoahBarrierSetAssembler; + +class ShenandoahBarrierSet: public BarrierSet { +public: + enum ArrayCopyStoreValMode { + NONE, + RESOLVE_BARRIER, + EVAC_BARRIER + }; +private: + + static ShenandoahSATBMarkQueueSet _satb_mark_queue_set; + + ShenandoahHeap* _heap; + +public: + ShenandoahBarrierSet(ShenandoahHeap* heap); + + static ShenandoahBarrierSetAssembler* assembler(); + + inline static ShenandoahBarrierSet* barrier_set() { + return barrier_set_cast(BarrierSet::barrier_set()); + } + + static ShenandoahSATBMarkQueueSet& satb_mark_queue_set() { + return _satb_mark_queue_set; + } + + static bool need_load_reference_barrier(DecoratorSet decorators, BasicType type); + static bool need_keep_alive_barrier(DecoratorSet decorators, BasicType type); + + void print_on(outputStream* st) const; + + bool is_a(BarrierSet::Name bsn); + + bool is_aligned(HeapWord* hw); + + template void + write_ref_array_pre_work(T* src, T* dst, size_t count, bool dest_uninitialized); + + inline void arraycopy_pre(oop* src, oop* dst, size_t count); + inline void arraycopy_pre(narrowOop* src, narrowOop* dst, size_t count); + inline void arraycopy_update(oop* src, size_t count); + inline void arraycopy_update(narrowOop* src, size_t count); + inline void clone_barrier(oop src); + void clone_barrier_runtime(oop src); + + virtual void on_thread_create(Thread* thread); + virtual void on_thread_destroy(Thread* thread); + virtual void on_thread_attach(JavaThread* thread); + virtual void on_thread_detach(JavaThread* thread); + + static inline oop resolve_forwarded_not_null(oop p); + static inline oop resolve_forwarded(oop p); + + template + inline void satb_barrier(T* field); + inline void satb_enqueue(oop value); + inline void storeval_barrier(oop obj); + + template + inline void keep_alive_if_weak(oop value); + inline void keep_alive_if_weak(DecoratorSet decorators, oop value); + inline void keep_alive_barrier(oop value); + + inline void enqueue(oop obj); + + oop load_reference_barrier(oop obj); + oop load_reference_barrier_not_null(oop obj); + + oop load_reference_barrier_mutator(oop obj, oop* load_addr); + oop load_reference_barrier_mutator(oop obj, narrowOop* load_addr); + + template + oop load_reference_barrier_mutator_work(oop obj, T* load_addr); + +private: + template + inline void arraycopy_pre_work(T* src, T* dst, size_t count); + template + inline void arraycopy_work(T* src, size_t count); + template + inline void arraycopy_update_impl(T* src, size_t count); + + oop load_reference_barrier_impl(oop obj); + +public: + // Callbacks for runtime accesses. + template + class AccessBarrier: public BarrierSet::AccessBarrier { + typedef BarrierSet::AccessBarrier Raw; + + public: + // Heap oop accesses. These accessors get resolved when + // IN_HEAP is set (e.g. when using the HeapAccess API), it is + // an oop_* overload, and the barrier strength is AS_NORMAL. + template + static oop oop_load_in_heap(T* addr); + static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); + + template + static void oop_store_in_heap(T* addr, oop value); + static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value); + + template + static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value); + + template + static oop oop_atomic_xchg_in_heap(oop new_value, T* addr); + static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset); + + template + static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, + size_t length); + + // Clone barrier support + static void clone_in_heap(oop src, oop dst, size_t size); + + // Needed for loads on non-heap weak references + template + static oop oop_load_not_in_heap(T* addr); + + // Used for catching bad stores + template + static void oop_store_not_in_heap(T* addr, oop value); + + template + static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value); + + template + static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr); + + }; + +}; + +template<> +struct BarrierSet::GetName { + static const BarrierSet::Name value = BarrierSet::ShenandoahBarrierSet; +}; + +template<> +struct BarrierSet::GetType { + typedef ::ShenandoahBarrierSet type; +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp 2020-01-17 17:09:52.882131428 +0100 @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP + +#include "gc/shared/barrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" +#include "gc/shenandoah/shenandoahForwarding.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/oop.inline.hpp" + +inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) { + return ShenandoahForwarding::get_forwardee(p); +} + +inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) { + if (((HeapWord*) p) != NULL) { + return resolve_forwarded_not_null(p); + } else { + return p; + } +} + +inline void ShenandoahBarrierSet::enqueue(oop obj) { + shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress()); + assert(_satb_mark_queue_set.is_active(), "only get here when SATB active"); + + // Filter marked objects before hitting the SATB queues. The same predicate would + // be used by SATBMQ::filter to eliminate already marked objects downstream, but + // filtering here helps to avoid wasteful SATB queueing work to begin with. + if (!_heap->requires_marking(obj)) return; + + Thread* thr = Thread::current(); + if (thr->is_Java_thread()) { + ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue_known_active(obj); + } else { + MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); + _satb_mark_queue_set.shared_satb_queue()->enqueue_known_active(obj); + } +} + +template +inline void ShenandoahBarrierSet::satb_barrier(T *field) { + if (HasDecorator::value || + HasDecorator::value) { + return; + } + if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { + T heap_oop = RawAccess<>::oop_load(field); + if (!CompressedOops::is_null(heap_oop)) { + enqueue(CompressedOops::decode(heap_oop)); + } + } +} + +inline void ShenandoahBarrierSet::satb_enqueue(oop value) { + assert(value != NULL, "checked before"); + if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { + enqueue(value); + } +} + +inline void ShenandoahBarrierSet::storeval_barrier(oop obj) { + if (obj != NULL && ShenandoahStoreValEnqueueBarrier && _heap->is_concurrent_traversal_in_progress()) { + enqueue(obj); + } +} + +inline void ShenandoahBarrierSet::keep_alive_barrier(oop value) { + assert(value != NULL, "checked before"); + if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) { + enqueue(value); + } +} + +inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oop value) { + assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); + const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0; + const bool peek = (decorators & AS_NO_KEEPALIVE) != 0; + if (!peek && !on_strong_oop_ref) { + keep_alive_barrier(value); + } +} + +template +inline void ShenandoahBarrierSet::keep_alive_if_weak(oop value) { + assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); + if (!HasDecorator::value && + !HasDecorator::value) { + keep_alive_barrier(value); + } +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { + oop value = Raw::oop_load_not_in_heap(addr); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_not_null(value); + bs->keep_alive_if_weak(value); + } + return value; +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) { + oop value = Raw::oop_load_in_heap(addr); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_not_null(value); + bs->keep_alive_if_weak(value); + } + return value; +} + +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) { + oop value = Raw::oop_load_in_heap_at(base, offset); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_not_null(value); + bs->keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), + value); + } + return value; +} + +template +template +inline void ShenandoahBarrierSet::AccessBarrier::oop_store_not_in_heap(T* addr, oop value) { + shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress()); + ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(value); + bs->satb_barrier(addr); + Raw::oop_store(addr, value); +} + +template +template +inline void ShenandoahBarrierSet::AccessBarrier::oop_store_in_heap(T* addr, oop value) { + shenandoah_assert_not_in_cset_loc_except(addr, ShenandoahHeap::heap()->cancelled_gc()); + shenandoah_assert_not_forwarded_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); + shenandoah_assert_not_in_cset_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); + + oop_store_not_in_heap(addr, value); +} + +template +inline void ShenandoahBarrierSet::AccessBarrier::oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) { + oop_store_in_heap(AccessInternal::oop_field_addr(base, offset), value); +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(new_value); + + oop res; + oop expected = compare_value; + do { + compare_value = expected; + res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + expected = res; + } while (compare_value != expected && resolve_forwarded(compare_value) == resolve_forwarded(expected)); + + // Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway, + // because it must be the previous value. + if (res != NULL) { + res = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res); + bs->satb_enqueue(res); + } + return res; +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { + return oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value); +} + +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { + return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr(base, offset), compare_value); +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(new_value); + + oop previous = Raw::oop_atomic_xchg(new_value, addr); + + // Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway, + // because it must be the previous value. + if (previous != NULL) { + previous = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous); + bs->satb_enqueue(previous); + } + return previous; +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(oop new_value, T* addr) { + return oop_atomic_xchg_in_heap_impl(new_value, addr); +} + +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { + return oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr(base, offset)); +} + +// Clone barrier support +template +void ShenandoahBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) { + if (ShenandoahCloneBarrier) { + ShenandoahBarrierSet::barrier_set()->clone_barrier_runtime(src); + } + Raw::clone(src, dst, size); +} + +template +template +bool ShenandoahBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, + size_t length) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_pre(arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw), + arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw), + length); + return Raw::oop_arraycopy_in_heap(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); +} + +template +void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { + Thread* thread = Thread::current(); + ShenandoahSATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); + ShenandoahMarkingContext* ctx = _heap->marking_context(); + const ShenandoahCollectionSet* const cset = _heap->collection_set(); + T* end = src + count; + for (T* elem_ptr = src; elem_ptr < end; elem_ptr++) { + T o = RawAccess<>::oop_load(elem_ptr); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (HAS_FWD && cset->is_in((HeapWord *) obj)) { + assert(_heap->has_forwarded_objects(), "only get here with forwarded objects"); + oop fwd = resolve_forwarded_not_null(obj); + if (EVAC && obj == fwd) { + fwd = _heap->evacuate_object(obj, thread); + } + assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); + oop witness = ShenandoahHeap::cas_oop(fwd, elem_ptr, o); + obj = fwd; + } + if (ENQUEUE && !ctx->is_marked(obj)) { + queue.enqueue_known_active(obj); + } + } + } +} + +template +void ShenandoahBarrierSet::arraycopy_pre_work(T* src, T* dst, size_t count) { + if (_heap->is_concurrent_mark_in_progress()) { + if (_heap->has_forwarded_objects()) { + arraycopy_work(dst, count); + } else { + arraycopy_work(dst, count); + } + } + + arraycopy_update_impl(src, count); +} + +void ShenandoahBarrierSet::arraycopy_pre(oop* src, oop* dst, size_t count) { + arraycopy_pre_work(src, dst, count); +} + +void ShenandoahBarrierSet::arraycopy_pre(narrowOop* src, narrowOop* dst, size_t count) { + arraycopy_pre_work(src, dst, count); +} + +template +void ShenandoahBarrierSet::arraycopy_update_impl(T* src, size_t count) { + if (_heap->is_evacuation_in_progress()) { + ShenandoahEvacOOMScope oom_evac; + arraycopy_work(src, count); + } else if (_heap->is_concurrent_traversal_in_progress()){ + ShenandoahEvacOOMScope oom_evac; + arraycopy_work(src, count); + } else if (_heap->has_forwarded_objects()) { + arraycopy_work(src, count); + } +} + +void ShenandoahBarrierSet::arraycopy_update(oop* src, size_t count) { + arraycopy_update_impl(src, count); +} + +void ShenandoahBarrierSet::arraycopy_update(narrowOop* src, size_t count) { + arraycopy_update_impl(src, count); +} + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetAssembler.hpp 2020-01-17 17:09:53.486131395 +0100 @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP + +#include "utilities/macros.hpp" + +#include CPU_HEADER(gc/shenandoah/shenandoahBarrierSetAssembler) + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp 2020-01-17 17:09:54.087131362 +0100 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP + +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" +#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "memory/iterator.hpp" +#include "oops/access.hpp" + +template +class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure { +private: + ShenandoahHeap* const _heap; + ShenandoahBarrierSet* const _bs; + const ShenandoahCollectionSet* const _cset; + Thread* const _thread; + + template + inline void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (_cset->is_in((HeapWord *)obj)) { + oop fwd = _bs->resolve_forwarded_not_null(obj); + if (EVAC && obj == fwd) { + fwd = _heap->evacuate_object(obj, _thread); + } + if (ENQUEUE) { + _bs->enqueue(fwd); + } + assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); + ShenandoahHeap::cas_oop(fwd, p, o); + } + + } + } +public: + ShenandoahUpdateRefsForOopClosure() : + _heap(ShenandoahHeap::heap()), + _bs(ShenandoahBarrierSet::barrier_set()), + _cset(_heap->collection_set()), + _thread(Thread::current()) { + } + + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +void ShenandoahBarrierSet::clone_barrier(oop obj) { + assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled"); + assert(_heap->has_forwarded_objects(), "only when heap is unstable"); + + // This is called for cloning an object (see jvm.cpp) after the clone + // has been made. We are not interested in any 'previous value' because + // it would be NULL in any case. But we *are* interested in any oop* + // that potentially need to be updated. + + shenandoah_assert_correct(NULL, obj); + if (_heap->is_evacuation_in_progress()) { + ShenandoahEvacOOMScope evac_scope; + ShenandoahUpdateRefsForOopClosure cl; + obj->oop_iterate(&cl); + } else if (_heap->is_concurrent_traversal_in_progress()) { + ShenandoahEvacOOMScope evac_scope; + ShenandoahUpdateRefsForOopClosure cl; + obj->oop_iterate(&cl); + } else { + ShenandoahUpdateRefsForOopClosure cl; + obj->oop_iterate(&cl); + } +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp 2020-01-17 17:09:54.685131329 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP + +#include "memory/iterator.hpp" + +class ShenandoahHeap; +class ShenandoahMarkingContext; +class Thread; + +class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure { +private: + ShenandoahMarkingContext* const _mark_context; +public: + inline ShenandoahForwardedIsAliveClosure(); + inline bool do_object_b(oop obj); +}; + +class ShenandoahIsAliveClosure: public BoolObjectClosure { +private: + ShenandoahMarkingContext* const _mark_context; +public: + inline ShenandoahIsAliveClosure(); + inline bool do_object_b(oop obj); +}; + +class ShenandoahIsAliveSelector : public StackObj { +private: + ShenandoahIsAliveClosure _alive_cl; + ShenandoahForwardedIsAliveClosure _fwd_alive_cl; +public: + inline BoolObjectClosure* is_alive_closure(); +}; + +class ShenandoahUpdateRefsClosure: public OopClosure { +private: + ShenandoahHeap* _heap; +public: + inline ShenandoahUpdateRefsClosure(); + inline void do_oop(oop* p); + inline void do_oop(narrowOop* p); +private: + template + inline void do_oop_work(T* p); +}; + +class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure { +private: + ShenandoahHeap* _heap; + Thread* _thread; +public: + inline ShenandoahEvacuateUpdateRootsClosure(); + inline void do_oop(oop* p); + inline void do_oop(narrowOop* p); + +private: + template + inline void do_oop_work(T* p); +}; + +#ifdef ASSERT +class ShenandoahAssertNotForwardedClosure : public OopClosure { +private: + template + inline void do_oop_work(T* p); + +public: + inline void do_oop(narrowOop* p); + inline void do_oop(oop* p); +}; +#endif // ASSERT + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp 2020-01-17 17:09:55.293131296 +0100 @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahClosures.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "runtime/thread.hpp" + +ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : + _mark_context(ShenandoahHeap::heap()->marking_context()) { +} + +bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { + if (CompressedOops::is_null(obj)) { + return false; + } + obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + shenandoah_assert_not_forwarded_if(NULL, obj, + (ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || + ShenandoahHeap::heap()->is_concurrent_traversal_in_progress())); + return _mark_context->is_marked(obj); +} + +ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : + _mark_context(ShenandoahHeap::heap()->marking_context()) { +} + +bool ShenandoahIsAliveClosure::do_object_b(oop obj) { + if (CompressedOops::is_null(obj)) { + return false; + } + shenandoah_assert_not_forwarded(NULL, obj); + return _mark_context->is_marked(obj); +} + +BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() { + return ShenandoahHeap::heap()->has_forwarded_objects() ? + reinterpret_cast(&_fwd_alive_cl) : + reinterpret_cast(&_alive_cl); +} + +ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : + _heap(ShenandoahHeap::heap()) { +} + +template +void ShenandoahUpdateRefsClosure::do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + _heap->update_with_forwarded_not_null(p, obj); + } +} + +void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } +void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } + +ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() : + _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { +} + +template +void ShenandoahEvacuateUpdateRootsClosure::do_oop_work(T* p) { + assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); + + T o = RawAccess<>::oop_load(p); + if (! CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (_heap->in_collection_set(obj)) { + shenandoah_assert_marked(p, obj); + oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + if (resolved == obj) { + resolved = _heap->evacuate_object(obj, _thread); + } + RawAccess::oop_store(p, resolved); + } + } +} + +void ShenandoahEvacuateUpdateRootsClosure::do_oop(oop* p) { + do_oop_work(p); +} + +void ShenandoahEvacuateUpdateRootsClosure::do_oop(narrowOop* p) { + do_oop_work(p); +} + +#ifdef ASSERT +template +void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + shenandoah_assert_not_forwarded(p, obj); + } +} + +void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p); } +void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); } +#endif + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp 2020-01-17 17:09:55.901131262 +0100 @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "code/codeCache.hpp" +#include "code/nmethod.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahCodeRoots.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "memory/resourceArea.hpp" + +ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray* heaps) { + _length = heaps->length(); + _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC); + for (int h = 0; h < _length; h++) { + _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h)); + } +} + +ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() { + FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters); +} + +void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { + for (int c = 0; c < _length; c++) { + _iters[c].parallel_blobs_do(f); + } +} + +ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) : + _heap(heap), _claimed_idx(0), _finished(false) { +} + +void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); + + /* + * Parallel code heap walk. + * + * This code makes all threads scan all code heaps, but only one thread would execute the + * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread + * had claimed the block, it can process all blobs in it. Others have to fast-forward to + * next attempt without processing. + * + * Late threads would return immediately if iterator is finished. + */ + + if (_finished) { + return; + } + + int stride = 256; // educated guess + int stride_mask = stride - 1; + assert (is_power_of_2(stride), "sanity"); + + int count = 0; + bool process_block = true; + + for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) { + int current = count++; + if ((current & stride_mask) == 0) { + process_block = (current >= _claimed_idx) && + (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current); + } + if (process_block) { + if (cb->is_alive()) { + f->do_code_blob(cb); +#ifdef ASSERT + if (cb->is_nmethod()) + Universe::heap()->verify_nmethod((nmethod*)cb); +#endif + } + } + } + + _finished = true; +} + +class ShenandoahNMethodOopDetector : public OopClosure { +private: + ResourceMark rm; // For growable array allocation below. + GrowableArray _oops; + +public: + ShenandoahNMethodOopDetector() : _oops(10) {}; + + void do_oop(oop* o) { + _oops.append(o); + } + void do_oop(narrowOop* o) { + fatal("NMethods should not have compressed oops embedded."); + } + + GrowableArray* oops() { + return &_oops; + } + + bool has_oops() { + return !_oops.is_empty(); + } +}; + +GrowableArray* ShenandoahCodeRoots::_recorded_nms; +ShenandoahLock ShenandoahCodeRoots::_recorded_nms_lock; + +void ShenandoahCodeRoots::initialize() { + _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray(100, true, mtGC); +} + +void ShenandoahCodeRoots::add_nmethod(nmethod* nm) { + switch (ShenandoahCodeRootsStyle) { + case 0: + case 1: + break; + case 2: { + assert_locked_or_safepoint(CodeCache_lock); + ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); + + ShenandoahNMethodOopDetector detector; + nm->oops_do(&detector); + + if (detector.has_oops()) { + ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops()); + nmr->assert_alive_and_correct(); + int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); + if (idx != -1) { + ShenandoahNMethod* old = _recorded_nms->at(idx); + _recorded_nms->at_put(idx, nmr); + delete old; + } else { + _recorded_nms->append(nmr); + } + } + break; + } + default: + ShouldNotReachHere(); + } +}; + +void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) { + switch (ShenandoahCodeRootsStyle) { + case 0: + case 1: { + break; + } + case 2: { + assert_locked_or_safepoint(CodeCache_lock); + ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock); + + ShenandoahNMethodOopDetector detector; + nm->oops_do(&detector, /* allow_zombie = */ true); + + if (detector.has_oops()) { + int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); + assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm)); + ShenandoahNMethod* old = _recorded_nms->at(idx); + old->assert_same_oops(detector.oops()); + _recorded_nms->delete_at(idx); + delete old; + } + break; + } + default: + ShouldNotReachHere(); + } +} + +ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : + _heap(ShenandoahHeap::heap()), + _par_iterator(CodeCache::heaps()), + _claimed(0) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); + assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); + switch (ShenandoahCodeRootsStyle) { + case 0: + case 1: { + // No need to do anything here + break; + } + case 2: { + CodeCache_lock->lock_without_safepoint_check(); + break; + } + default: + ShouldNotReachHere(); + } +} + +ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { + switch (ShenandoahCodeRootsStyle) { + case 0: + case 1: { + // No need to do anything here + break; + } + case 2: { + CodeCache_lock->unlock(); + break; + } + default: + ShouldNotReachHere(); + } +} + +template +void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { + switch (ShenandoahCodeRootsStyle) { + case 0: { + if (_seq_claimed.try_set()) { + CodeCache::blobs_do(f); + } + break; + } + case 1: { + _par_iterator.parallel_blobs_do(f); + break; + } + case 2: { + ShenandoahCodeRootsIterator::fast_parallel_blobs_do(f); + break; + } + default: + ShouldNotReachHere(); + } +} + +void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { + ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(f); +} + +void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { + ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(f); +} + +template +void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); + + size_t stride = 256; // educated guess + + GrowableArray* list = ShenandoahCodeRoots::_recorded_nms; + + size_t max = (size_t)list->length(); + while (_claimed < max) { + size_t cur = Atomic::add(stride, &_claimed) - stride; + size_t start = cur; + size_t end = MIN2(cur + stride, max); + if (start >= max) break; + + for (size_t idx = start; idx < end; idx++) { + ShenandoahNMethod* nmr = list->at((int) idx); + nmr->assert_alive_and_correct(); + + if (CSET_FILTER && !nmr->has_cset_oops(_heap)) { + continue; + } + + f->do_code_blob(nmr->nm()); + } + } +} + +ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray* oops) { + _nm = nm; + _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC); + _oops_count = oops->length(); + for (int c = 0; c < _oops_count; c++) { + _oops[c] = oops->at(c); + } +} + +ShenandoahNMethod::~ShenandoahNMethod() { + if (_oops != NULL) { + FREE_C_HEAP_ARRAY(oop*, _oops); + } +} + +bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) { + for (int c = 0; c < _oops_count; c++) { + oop o = RawAccess<>::oop_load(_oops[c]); + if (heap->in_collection_set(o)) { + return true; + } + } + return false; +} + +#ifdef ASSERT +void ShenandoahNMethod::assert_alive_and_correct() { + assert(_nm->is_alive(), "only alive nmethods here"); + assert(_oops_count > 0, "should have filtered nmethods without oops before"); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + for (int c = 0; c < _oops_count; c++) { + oop *loc = _oops[c]; + assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); + oop o = RawAccess<>::oop_load(loc); + shenandoah_assert_correct_except(loc, o, + o == NULL || + heap->is_full_gc_move_in_progress() || + (VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation) + ); + } +} + +void ShenandoahNMethod::assert_same_oops(GrowableArray* oops) { + assert(_oops_count == oops->length(), "should have the same number of oop*"); + for (int c = 0; c < _oops_count; c++) { + assert(_oops[c] == oops->at(c), "should be the same oop*"); + } +} +#endif --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp 2020-01-17 17:09:56.503131229 +0100 @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP + +#include "code/codeCache.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "gc/shenandoah/shenandoahLock.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" + +class ShenandoahHeap; +class ShenandoahHeapRegion; + +class ShenandoahParallelCodeHeapIterator { + friend class CodeCache; +private: + CodeHeap* _heap; + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); + volatile int _claimed_idx; + volatile bool _finished; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); +public: + ShenandoahParallelCodeHeapIterator(CodeHeap* heap); + void parallel_blobs_do(CodeBlobClosure* f); +}; + +class ShenandoahParallelCodeCacheIterator { + friend class CodeCache; +private: + ShenandoahParallelCodeHeapIterator* _iters; + int _length; + +private: + // Noncopyable. + ShenandoahParallelCodeCacheIterator(const ShenandoahParallelCodeCacheIterator& o); + ShenandoahParallelCodeCacheIterator& operator=(const ShenandoahParallelCodeCacheIterator& o); +public: + ShenandoahParallelCodeCacheIterator(const GrowableArray* heaps); + ~ShenandoahParallelCodeCacheIterator(); + void parallel_blobs_do(CodeBlobClosure* f); +}; + +// ShenandoahNMethod tuple records the internal locations of oop slots within the nmethod. +// This allows us to quickly scan the oops without doing the nmethod-internal scans, that +// sometimes involves parsing the machine code. Note it does not record the oops themselves, +// because it would then require handling these tuples as the new class of roots. +class ShenandoahNMethod : public CHeapObj { +private: + nmethod* _nm; + oop** _oops; + int _oops_count; + +public: + ShenandoahNMethod(nmethod *nm, GrowableArray* oops); + ~ShenandoahNMethod(); + + nmethod* nm() { + return _nm; + } + + bool has_cset_oops(ShenandoahHeap* heap); + + void assert_alive_and_correct() NOT_DEBUG_RETURN; + void assert_same_oops(GrowableArray* oops) NOT_DEBUG_RETURN; + + static bool find_with_nmethod(void* nm, ShenandoahNMethod* other) { + return other->_nm == nm; + } +}; + +class ShenandoahCodeRootsIterator { + friend class ShenandoahCodeRoots; +protected: + ShenandoahHeap* _heap; + ShenandoahParallelCodeCacheIterator _par_iterator; + ShenandoahSharedFlag _seq_claimed; + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _claimed; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); +protected: + ShenandoahCodeRootsIterator(); + ~ShenandoahCodeRootsIterator(); + + template + void dispatch_parallel_blobs_do(CodeBlobClosure *f); + + template + void fast_parallel_blobs_do(CodeBlobClosure *f); +}; + +class ShenandoahAllCodeRootsIterator : public ShenandoahCodeRootsIterator { +public: + ShenandoahAllCodeRootsIterator() : ShenandoahCodeRootsIterator() {}; + void possibly_parallel_blobs_do(CodeBlobClosure *f); +}; + +class ShenandoahCsetCodeRootsIterator : public ShenandoahCodeRootsIterator { +public: + ShenandoahCsetCodeRootsIterator() : ShenandoahCodeRootsIterator() {}; + void possibly_parallel_blobs_do(CodeBlobClosure* f); +}; + +class ShenandoahCodeRoots : public AllStatic { + friend class ShenandoahHeap; + friend class ShenandoahCodeRootsIterator; + +public: + static void initialize(); + static void add_nmethod(nmethod* nm); + static void remove_nmethod(nmethod* nm); + +private: + static GrowableArray* _recorded_nms; + static ShenandoahLock _recorded_nms_lock; +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp 2020-01-17 17:09:57.102131196 +0100 @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "runtime/atomic.hpp" +#include "services/memTracker.hpp" +#include "utilities/copy.hpp" + +ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, char* heap_base, size_t size) : + _map_size(heap->num_regions()), + _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), + _map_space(align_up(((uintx)heap_base + size) >> _region_size_bytes_shift, os::vm_allocation_granularity())), + _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)), + _biased_cset_map(_map_space.base()), + _heap(heap), + _garbage(0), + _live_data(0), + _used(0), + _region_count(0), + _current_index(0) { + + // The collection set map is reserved to cover the entire heap *and* zero addresses. + // This is needed to accept in-cset checks for both heap oops and NULLs, freeing + // high-performance code from checking for NULL first. + // + // Since heap_base can be far away, committing the entire map would waste memory. + // Therefore, we only commit the parts that are needed to operate: the heap view, + // and the zero page. + // + // Note: we could instead commit the entire map, and piggyback on OS virtual memory + // subsystem for mapping not-yet-written-to pages to a single physical backing page, + // but this is not guaranteed, and would confuse NMT and other memory accounting tools. + + MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); + + size_t page_size = (size_t)os::vm_page_size(); + + if (!_map_space.special()) { + // Commit entire pages that cover the heap cset map. + char* bot_addr = align_down(_cset_map, page_size); + char* top_addr = align_up(_cset_map + _map_size, page_size); + os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false, + "Unable to commit collection set bitmap: heap"); + + // Commit the zero page, if not yet covered by heap cset map. + if (bot_addr != _biased_cset_map) { + os::commit_memory_or_exit(_biased_cset_map, page_size, false, + "Unable to commit collection set bitmap: zero page"); + } + } + + Copy::zero_to_bytes(_cset_map, _map_size); + Copy::zero_to_bytes(_biased_cset_map, page_size); +} + +void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Must be VMThread"); + assert(!is_in(r), "Already in collection set"); + _cset_map[r->region_number()] = 1; + _region_count ++; + _garbage += r->garbage(); + _live_data += r->get_live_data_bytes(); + _used += r->used(); +} + +bool ShenandoahCollectionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) { + if (!is_in(r)) { + add_region(r); + return true; + } else { + return false; + } +} + +void ShenandoahCollectionSet::remove_region(ShenandoahHeapRegion* r) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Must be VMThread"); + assert(is_in(r), "Not in collection set"); + _cset_map[r->region_number()] = 0; + _region_count --; +} + +void ShenandoahCollectionSet::update_region_status() { + for (size_t index = 0; index < _heap->num_regions(); index ++) { + ShenandoahHeapRegion* r = _heap->get_region(index); + if (is_in(r)) { + r->make_cset(); + } else { + assert (!r->is_cset(), "should not be cset"); + } + } +} + +void ShenandoahCollectionSet::clear() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + Copy::zero_to_bytes(_cset_map, _map_size); + +#ifdef ASSERT + for (size_t index = 0; index < _heap->num_regions(); index ++) { + assert (!_heap->get_region(index)->is_cset(), "should have been cleared before"); + } +#endif + + _garbage = 0; + _live_data = 0; + _used = 0; + + _region_count = 0; + _current_index = 0; +} + +ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { + size_t num_regions = _heap->num_regions(); + if (_current_index >= (jint)num_regions) { + return NULL; + } + + jint saved_current = _current_index; + size_t index = (size_t)saved_current; + + while(index < num_regions) { + if (is_in(index)) { + jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); + assert(cur >= (jint)saved_current, "Must move forward"); + if (cur == saved_current) { + assert(is_in(index), "Invariant"); + return _heap->get_region(index); + } else { + index = (size_t)cur; + saved_current = cur; + } + } else { + index ++; + } + } + return NULL; +} + +ShenandoahHeapRegion* ShenandoahCollectionSet::next() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Must be VMThread"); + size_t num_regions = _heap->num_regions(); + for (size_t index = (size_t)_current_index; index < num_regions; index ++) { + if (is_in(index)) { + _current_index = (jint)(index + 1); + return _heap->get_region(index); + } + } + + return NULL; +} + +void ShenandoahCollectionSet::print_on(outputStream* out) const { + out->print_cr("Collection Set : " SIZE_FORMAT "", count()); + + debug_only(size_t regions = 0;) + for (size_t index = 0; index < _heap->num_regions(); index ++) { + if (is_in(index)) { + _heap->get_region(index)->print_on(out); + debug_only(regions ++;) + } + } + assert(regions == count(), "Must match"); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp 2020-01-17 17:09:57.712131162 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP + +#include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" + +class ShenandoahCollectionSet : public CHeapObj { + friend class ShenandoahHeap; +private: + size_t const _map_size; + size_t const _region_size_bytes_shift; + ReservedSpace _map_space; + char* const _cset_map; + // Bias cset map's base address for fast test if an oop is in cset + char* const _biased_cset_map; + + ShenandoahHeap* const _heap; + + size_t _garbage; + size_t _live_data; + size_t _used; + size_t _region_count; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile jint _current_index; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + ShenandoahCollectionSet(ShenandoahHeap* heap, char* heap_base, size_t size); + + // Add region to collection set + void add_region(ShenandoahHeapRegion* r); + bool add_region_check_for_duplicates(ShenandoahHeapRegion* r); + + // Bring per-region statuses to consistency with this collection. + // TODO: This is a transitional interface that bridges the gap between + // region statuses and this collection. Should go away after we merge them. + void update_region_status(); + + // Remove region from collection set + void remove_region(ShenandoahHeapRegion* r); + + // MT version + ShenandoahHeapRegion* claim_next(); + + // Single-thread version + ShenandoahHeapRegion* next(); + + size_t count() const { return _region_count; } + bool is_empty() const { return _region_count == 0; } + + void clear_current_index() { + _current_index = 0; + } + + inline bool is_in(ShenandoahHeapRegion* r) const; + inline bool is_in(size_t region_number) const; + inline bool is_in(HeapWord* p) const; + + void print_on(outputStream* out) const; + + size_t used() const { return _used; } + size_t live_data() const { return _live_data; } + size_t garbage() const { return _garbage; } + void clear(); + +private: + char* map_address() const { + return _cset_map; + } + char* biased_map_address() const { + return _biased_cset_map; + } +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp 2020-01-17 17:09:58.319131129 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP + +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" + +bool ShenandoahCollectionSet::is_in(size_t region_number) const { + assert(region_number < _heap->num_regions(), "Sanity"); + return _cset_map[region_number] == 1; +} + +bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const { + return is_in(r->region_number()); +} + +bool ShenandoahCollectionSet::is_in(HeapWord* p) const { + assert(_heap->is_in(p), "Must be in the heap"); + uintx index = ((uintx) p) >> _region_size_bytes_shift; + // no need to subtract the bottom of the heap from p, + // _biased_cset_map is biased + return _biased_cset_map[index] == 1; +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp 2020-01-17 17:09:58.910131096 +0100 @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "runtime/os.hpp" + +ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : + _success_concurrent_gcs(0), + _success_degenerated_gcs(0), + _success_full_gcs(0), + _alloc_failure_degenerated(0), + _alloc_failure_degenerated_upgrade_to_full(0), + _alloc_failure_full(0), + _explicit_concurrent(0), + _explicit_full(0), + _implicit_concurrent(0), + _implicit_full(0), + _cycle_counter(0) { + + Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT); + + ShenandoahHeapRegion::setup_sizes(max_heap_byte_size()); + + initialize_all(); + + _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer(); + +} + +void ShenandoahCollectorPolicy::initialize_alignments() { + // This is expected by our algorithm for ShenandoahHeap::heap_region_containing(). + size_t align = ShenandoahHeapRegion::region_size_bytes(); + if (UseLargePages) { + align = MAX2(align, os::large_page_size()); + } + _space_alignment = align; + _heap_alignment = align; +} + +void ShenandoahCollectorPolicy::record_explicit_to_concurrent() { + _explicit_concurrent++; +} + +void ShenandoahCollectorPolicy::record_explicit_to_full() { + _explicit_full++; +} + +void ShenandoahCollectorPolicy::record_implicit_to_concurrent() { + _implicit_concurrent++; +} + +void ShenandoahCollectorPolicy::record_implicit_to_full() { + _implicit_full++; +} + +void ShenandoahCollectorPolicy::record_alloc_failure_to_full() { + _alloc_failure_full++; +} + +void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) { + assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity"); + _alloc_failure_degenerated++; + _degen_points[point]++; +} + +void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() { + _alloc_failure_degenerated_upgrade_to_full++; +} + +void ShenandoahCollectorPolicy::record_success_concurrent() { + _success_concurrent_gcs++; +} + +void ShenandoahCollectorPolicy::record_success_degenerated() { + _success_degenerated_gcs++; +} + +void ShenandoahCollectorPolicy::record_success_full() { + _success_full_gcs++; +} + +size_t ShenandoahCollectorPolicy::cycle_counter() const { + return _cycle_counter; +} + +void ShenandoahCollectorPolicy::record_cycle_start() { + _cycle_counter++; +} + +void ShenandoahCollectorPolicy::record_shutdown() { + _in_shutdown.set(); +} + +bool ShenandoahCollectorPolicy::is_at_shutdown() { + return _in_shutdown.is_set(); +} + +void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { + out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle"); + out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,"); + out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate"); + out->print_cr("to avoid Degenerated and Full GC cycles."); + out->cr(); + + out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_concurrent); + out->cr(); + + out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs", _success_degenerated_gcs); + out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated); + for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) { + if (_degen_points[c] > 0) { + const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c); + out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc); + } + } + out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC", _alloc_failure_degenerated_upgrade_to_full); + out->cr(); + + out->print_cr(SIZE_FORMAT_W(5) " Full GCs", _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_full); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_full); + out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full); + out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp 2020-01-17 17:09:59.515131063 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP + +#include "gc/shared/collectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahTracer.hpp" +#include "utilities/ostream.hpp" + +class ShenandoahCollectorPolicy: public CollectorPolicy { +private: + size_t _success_concurrent_gcs; + size_t _success_degenerated_gcs; + size_t _success_full_gcs; + size_t _alloc_failure_degenerated; + size_t _alloc_failure_degenerated_upgrade_to_full; + size_t _alloc_failure_full; + size_t _explicit_concurrent; + size_t _explicit_full; + size_t _implicit_concurrent; + size_t _implicit_full; + size_t _degen_points[ShenandoahHeap::_DEGENERATED_LIMIT]; + + ShenandoahSharedFlag _in_shutdown; + + ShenandoahTracer* _tracer; + + size_t _cycle_counter; + +public: + ShenandoahCollectorPolicy(); + + void initialize_alignments(); + + // TODO: This is different from gc_end: that one encompasses one VM operation. + // These two encompass the entire cycle. + void record_cycle_start(); + + void record_success_concurrent(); + void record_success_degenerated(); + void record_success_full(); + void record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point); + void record_alloc_failure_to_full(); + void record_degenerated_upgrade_to_full(); + void record_explicit_to_concurrent(); + void record_explicit_to_full(); + void record_implicit_to_concurrent(); + void record_implicit_to_full(); + + void record_shutdown(); + bool is_at_shutdown(); + + ShenandoahTracer* tracer() {return _tracer;} + + size_t cycle_counter() const; + + void print_gc_stats(outputStream* out) const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp 2020-01-17 17:10:00.104131030 +0100 @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" + +#include "gc/shared/weakProcessor.hpp" +#include "gc/shared/gcTimer.hpp" +#include "gc/shared/referenceProcessor.hpp" +#include "gc/shared/referenceProcessorPhaseTimes.hpp" + +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" +#include "gc/shenandoah/shenandoahMarkCompact.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shared/weakProcessor.hpp" + +#include "memory/iterator.inline.hpp" +#include "memory/metaspace.hpp" +#include "memory/resourceArea.hpp" +#include "oops/oop.inline.hpp" + +template +class ShenandoahInitMarkRootsClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); + } + +public: + ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) {}; + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + MetadataVisitingOopIterateClosure(rp), + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) +{ } + +template +class ShenandoahInitMarkRootsTask : public AbstractGangTask { +private: + ShenandoahAllRootScanner* _rp; + bool _process_refs; +public: + ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) : + AbstractGangTask("Shenandoah init mark roots task"), + _rp(rp), + _process_refs(process_refs) { + } + + void work(uint worker_id) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahParallelWorkerSession worker_session(worker_id); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues(); + assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id); + + ShenandoahObjToScanQueue* q = queues->queue(worker_id); + + ShenandoahInitMarkRootsClosure mark_cl(q); + do_work(heap, &mark_cl, worker_id); + } + +private: + void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) { + // The rationale for selecting the roots to scan is as follows: + // a. With unload_classes = true, we only want to scan the actual strong roots from the + // code cache. This will allow us to identify the dead classes, unload them, *and* + // invalidate the relevant code cache blobs. This could be only done together with + // class unloading. + // b. With unload_classes = false, we have to nominally retain all the references from code + // cache, because there could be the case of embedded class/oop in the generated code, + // which we will never visit during mark. Without code cache invalidation, as in (a), + // we risk executing that code cache blob, and crashing. + if (heap->unload_classes()) { + _rp->strong_roots_do(worker_id, oops); + } else { + _rp->roots_do(worker_id, oops); + } + } +}; + +class ShenandoahUpdateRootsTask : public AbstractGangTask { +private: + ShenandoahRootUpdater* _root_updater; +public: + ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) : + AbstractGangTask("Shenandoah update roots task"), + _root_updater(root_updater) { + } + + void work(uint worker_id) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahParallelWorkerSession worker_session(worker_id); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahUpdateRefsClosure cl; + AlwaysTrueClosure always_true; + _root_updater->roots_do(worker_id, &always_true, &cl); + } +}; + +class ShenandoahConcurrentMarkingTask : public AbstractGangTask { +private: + ShenandoahConcurrentMark* _cm; + ShenandoahTaskTerminator* _terminator; + +public: + ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) : + AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) { + } + + void work(uint worker_id) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); + ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); + ReferenceProcessor* rp; + if (heap->process_references()) { + rp = heap->ref_processor(); + shenandoah_assert_rp_isalive_installed(); + } else { + rp = NULL; + } + + _cm->concurrent_scan_code_roots(worker_id, rp); + _cm->mark_loop(worker_id, _terminator, rp, + true, // cancellable + ShenandoahStringDedup::is_enabled()); // perform string dedup + } +}; + +class ShenandoahSATBThreadsClosure : public ThreadClosure { +private: + ShenandoahConcMarkSATBBufferClosure* _satb_cl; + int _thread_parity; + +public: + ShenandoahSATBThreadsClosure(ShenandoahConcMarkSATBBufferClosure* satb_cl) : + _satb_cl(satb_cl), + _thread_parity(Threads::thread_claim_parity()) {} + + void do_thread(Thread* thread) { + if (thread->is_Java_thread()) { + if (thread->claim_oops_do(true, _thread_parity)) { + JavaThread* jt = (JavaThread*)thread; + ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl); + } + } else if (thread->is_VM_thread()) { + if (thread->claim_oops_do(true, _thread_parity)) { + ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); + } + } + } +}; + +class ShenandoahFinalMarkingTask : public AbstractGangTask { +private: + ShenandoahConcurrentMark* _cm; + ShenandoahTaskTerminator* _terminator; + bool _dedup_string; + +public: + ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) : + AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) { + } + + void work(uint worker_id) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahParallelWorkerSession worker_session(worker_id); + // First drain remaining SATB buffers. + // Notice that this is not strictly necessary for mark-compact. But since + // it requires a StrongRootsScope around the task, we need to claim the + // threads, and performance-wise it doesn't really matter. Adds about 1ms to + // full-gc. + { + ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); + ShenandoahConcMarkSATBBufferClosure cl(q); + ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); + while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); + ShenandoahSATBThreadsClosure tc(&cl); + Threads::threads_do(&tc); + } + + ReferenceProcessor* rp; + if (heap->process_references()) { + rp = heap->ref_processor(); + shenandoah_assert_rp_isalive_installed(); + } else { + rp = NULL; + } + + if (heap->is_degenerated_gc_in_progress()) { + // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, + // let's check here. + _cm->concurrent_scan_code_roots(worker_id, rp); + } + + _cm->mark_loop(worker_id, _terminator, rp, + false, // not cancellable + _dedup_string); + + assert(_cm->task_queues()->is_empty(), "Should be empty"); + } +}; + +void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) { + assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahGCPhase phase(root_phase); + + WorkGang* workers = heap->workers(); + uint nworkers = workers->active_workers(); + + assert(nworkers <= task_queues()->size(), "Just check"); + + ShenandoahAllRootScanner root_proc(nworkers, root_phase); + TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); + task_queues()->reserve(nworkers); + + if (heap->has_forwarded_objects()) { + ShenandoahInitMarkRootsTask mark_roots(&root_proc, _heap->process_references()); + workers->run_task(&mark_roots); + } else { + // No need to update references, which means the heap is stable. + // Can save time not walking through forwarding pointers. + ShenandoahInitMarkRootsTask mark_roots(&root_proc, _heap->process_references()); + workers->run_task(&mark_roots); + } + + if (ShenandoahConcurrentScanCodeRoots) { + clear_claim_codecache(); + } +} + +void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + bool update_code_cache = true; // initialize to safer value + switch (root_phase) { + case ShenandoahPhaseTimings::update_roots: + case ShenandoahPhaseTimings::final_update_refs_roots: + update_code_cache = false; + break; + case ShenandoahPhaseTimings::full_gc_roots: + case ShenandoahPhaseTimings::degen_gc_update_roots: + update_code_cache = true; + break; + default: + ShouldNotReachHere(); + } + + ShenandoahGCPhase phase(root_phase); + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + + uint nworkers = _heap->workers()->active_workers(); + + ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache); + ShenandoahUpdateRootsTask update_roots(&root_updater); + _heap->workers()->run_task(&update_roots); + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif +} + +class ShenandoahUpdateThreadRootsTask : public AbstractGangTask { +private: + ShenandoahThreadRoots _thread_roots; + ShenandoahPhaseTimings::Phase _phase; +public: + ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) : + AbstractGangTask("Shenandoah Update Thread Roots"), + _thread_roots(is_par), + _phase(phase) { + ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase); + } + + ~ShenandoahUpdateThreadRootsTask() { + ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase); + } + void work(uint worker_id) { + ShenandoahUpdateRefsClosure cl; + _thread_roots.oops_do(&cl, NULL, worker_id); + } +}; + +void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahGCPhase phase(root_phase); + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + + WorkGang* workers = _heap->workers(); + bool is_par = workers->active_workers() > 1; + + ShenandoahUpdateThreadRootsTask task(is_par, root_phase); + workers->run_task(&task); + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif +} + +void ShenandoahConcurrentMark::initialize(uint workers) { + _heap = ShenandoahHeap::heap(); + + uint num_queues = MAX2(workers, 1U); + + _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues); + + for (uint i = 0; i < num_queues; ++i) { + ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); + task_queue->initialize(); + _task_queues->register_queue(i, task_queue); + } + + ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize); +} + +void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) { + if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) { + ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id); + if (!_heap->unload_classes()) { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + // TODO: We can not honor StringDeduplication here, due to lock ranking + // inversion. So, we may miss some deduplication candidates. + if (_heap->has_forwarded_objects()) { + ShenandoahMarkResolveRefsClosure cl(q, rp); + CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&blobs); + } else { + ShenandoahMarkRefsClosure cl(q, rp); + CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&blobs); + } + } + } +} + +void ShenandoahConcurrentMark::mark_from_roots() { + WorkGang* workers = _heap->workers(); + uint nworkers = workers->active_workers(); + + ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark); + + if (_heap->process_references()) { + ReferenceProcessor* rp = _heap->ref_processor(); + rp->set_active_mt_degree(nworkers); + + // enable ("weak") refs discovery + rp->enable_discovery(true /*verify_no_refs*/); + rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); + } + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahIsAliveSelector is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); + + task_queues()->reserve(nworkers); + + { + ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination); + ShenandoahTaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentMarkingTask task(this, &terminator); + workers->run_task(&task); + } + + assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled"); +} + +void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + uint nworkers = _heap->workers()->active_workers(); + + // Finally mark everything else we've got in our queues during the previous steps. + // It does two different things for concurrent vs. mark-compact GC: + // - For concurrent GC, it starts with empty task queues, drains the remaining + // SATB buffers, and then completes the marking closure. + // - For mark-compact GC, it starts out with the task queues seeded by initial + // root scan, and completes the closure, thus marking through all live objects + // The implementation is the same, so it's shared here. + { + ShenandoahGCPhase phase(full_gc ? + ShenandoahPhaseTimings::full_gc_mark_finish_queues : + ShenandoahPhaseTimings::finish_queues); + task_queues()->reserve(nworkers); + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahIsAliveSelector is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); + + ShenandoahTerminationTracker termination_tracker(full_gc ? + ShenandoahPhaseTimings::full_gc_mark_termination : + ShenandoahPhaseTimings::termination); + + StrongRootsScope scope(nworkers); + ShenandoahTaskTerminator terminator(nworkers, task_queues()); + ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); + _heap->workers()->run_task(&task); + } + + assert(task_queues()->is_empty(), "Should be empty"); + + // When we're done marking everything, we process weak references. + if (_heap->process_references()) { + weak_refs_work(full_gc); + } + + weak_roots_work(); + + // And finally finish class unloading + if (_heap->unload_classes()) { + _heap->unload_classes_and_cleanup_tables(full_gc); + } else { + ShenandoahIsAliveSelector alive; + StringTable::unlink(alive.is_alive_closure()); + } + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahIsAliveSelector alive; + BoolObjectClosure* is_alive = alive.is_alive_closure(); + ShenandoahStringDedup::unlink_or_oops_do(is_alive, NULL, false); + } + assert(task_queues()->is_empty(), "Should be empty"); + TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); + + // Resize Metaspace + MetaspaceGC::compute_new_size(); +} + +// Weak Reference Closures +class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { + uint _worker_id; + ShenandoahTaskTerminator* _terminator; + bool _reset_terminator; + +public: + ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): + _worker_id(worker_id), + _terminator(t), + _reset_terminator(reset_terminator) { + } + + void do_void() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* sh = ShenandoahHeap::heap(); + ShenandoahConcurrentMark* scm = sh->concurrent_mark(); + assert(sh->process_references(), "why else would we be here?"); + ReferenceProcessor* rp = sh->ref_processor(); + + shenandoah_assert_rp_isalive_installed(); + + scm->mark_loop(_worker_id, _terminator, rp, + false, // not cancellable + false); // do not do strdedup + + if (_reset_terminator) { + _terminator->reset_for_reuse(); + } + } +}; + +class ShenandoahCMKeepAliveClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); + } + +public: + ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); + } + +public: + ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahWeakUpdateClosure : public OopClosure { +private: + ShenandoahHeap* const _heap; + + template + inline void do_oop_work(T* p) { + oop o = _heap->maybe_update_with_forwarded(p); + shenandoah_assert_marked_except(p, o, o == NULL); + } + +public: + ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahWeakAssertNotForwardedClosure : public OopClosure { +private: + template + inline void do_oop_work(T* p) { +#ifdef ASSERT + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + shenandoah_assert_not_forwarded(p, obj); + } +#endif + } + +public: + ShenandoahWeakAssertNotForwardedClosure() {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahRefProcTaskProxy : public AbstractGangTask { +private: + AbstractRefProcTaskExecutor::ProcessTask& _proc_task; + ShenandoahTaskTerminator* _terminator; + +public: + ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, + ShenandoahTaskTerminator* t) : + AbstractGangTask("Process reference objects in parallel"), + _proc_task(proc_task), + _terminator(t) { + } + + void work(uint worker_id) { + ResourceMark rm; + HandleMark hm; + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator); + if (heap->has_forwarded_objects()) { + ShenandoahForwardedIsAliveClosure is_alive; + ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } else { + ShenandoahIsAliveClosure is_alive; + ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } + } +}; + +class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor { +private: + WorkGang* _workers; + +public: + ShenandoahRefProcTaskExecutor(WorkGang* workers) : + _workers(workers) { + } + + // Executes a task using worker threads. + void execute(ProcessTask& task, uint ergo_workers) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahConcurrentMark* cm = heap->concurrent_mark(); + ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(), + ergo_workers, + /* do_check = */ false); + uint nworkers = _workers->active_workers(); + cm->task_queues()->reserve(nworkers); + ShenandoahTaskTerminator terminator(nworkers, cm->task_queues()); + ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator); + _workers->run_task(&proc_task_proxy); + } +}; + +void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) { + assert(_heap->process_references(), "sanity"); + + ShenandoahPhaseTimings::Phase phase_root = + full_gc ? + ShenandoahPhaseTimings::full_gc_weakrefs : + ShenandoahPhaseTimings::weakrefs; + + ShenandoahGCPhase phase(phase_root); + + ReferenceProcessor* rp = _heap->ref_processor(); + + // NOTE: We cannot shortcut on has_discovered_references() here, because + // we will miss marking JNI Weak refs then, see implementation in + // ReferenceProcessor::process_discovered_references. + weak_refs_work_doit(full_gc); + + rp->verify_no_references_recorded(); + assert(!rp->discovery_enabled(), "Post condition"); + +} + +// Process leftover weak oops: update them, if needed or assert they do not +// need updating otherwise. +// Weak processor API requires us to visit the oops, even if we are not doing +// anything to them. +void ShenandoahConcurrentMark::weak_roots_work() { + OopClosure* keep_alive = &do_nothing_cl; +#ifdef ASSERT + ShenandoahWeakAssertNotForwardedClosure verify_cl; + keep_alive = &verify_cl; +#endif + ShenandoahIsAliveClosure is_alive; + WeakProcessor::weak_oops_do(&is_alive, keep_alive); +} + +void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { + ReferenceProcessor* rp = _heap->ref_processor(); + + ShenandoahPhaseTimings::Phase phase_process = + full_gc ? + ShenandoahPhaseTimings::full_gc_weakrefs_process : + ShenandoahPhaseTimings::weakrefs_process; + + ShenandoahPhaseTimings::Phase phase_process_termination = + full_gc ? + ShenandoahPhaseTimings::full_gc_weakrefs_termination : + ShenandoahPhaseTimings::weakrefs_termination; + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahIsAliveSelector is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); + + WorkGang* workers = _heap->workers(); + uint nworkers = workers->active_workers(); + + rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); + rp->set_active_mt_degree(nworkers); + + assert(task_queues()->is_empty(), "Should be empty"); + + // complete_gc and keep_alive closures instantiated here are only needed for + // single-threaded path in RP. They share the queue 0 for tracking work, which + // simplifies implementation. Since RP may decide to call complete_gc several + // times, we need to be able to reuse the terminator. + uint serial_worker_id = 0; + ShenandoahTaskTerminator terminator(1, task_queues()); + ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); + + ShenandoahRefProcTaskExecutor executor(workers); + + ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); + + { + ShenandoahGCPhase phase(phase_process); + ShenandoahTerminationTracker phase_term(phase_process_termination); + + if (_heap->has_forwarded_objects()) { + ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id)); + rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive, + &complete_gc, &executor, + &pt); + + } else { + ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id)); + rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive, + &complete_gc, &executor, + &pt); + + } + + pt.print_all_references(); + + assert(task_queues()->is_empty(), "Should be empty"); + } +} + +class ShenandoahCancelledGCYieldClosure : public YieldClosure { +private: + ShenandoahHeap* const _heap; +public: + ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; + virtual bool should_return() { return _heap->cancelled_gc(); } +}; + +class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { +public: + void do_void() { + ShenandoahHeap* sh = ShenandoahHeap::heap(); + ShenandoahConcurrentMark* scm = sh->concurrent_mark(); + assert(sh->process_references(), "why else would we be here?"); + ShenandoahTaskTerminator terminator(1, scm->task_queues()); + + ReferenceProcessor* rp = sh->ref_processor(); + shenandoah_assert_rp_isalive_installed(); + + scm->mark_loop(0, &terminator, rp, + false, // not cancellable + false); // do not do strdedup + } +}; + +class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); + } + +public: + ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahPrecleanTask : public AbstractGangTask { +private: + ReferenceProcessor* _rp; + +public: + ShenandoahPrecleanTask(ReferenceProcessor* rp) : + AbstractGangTask("Precleaning task"), + _rp(rp) {} + + void work(uint worker_id) { + assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); + ShenandoahParallelWorkerSession worker_session(worker_id); + + ShenandoahHeap* sh = ShenandoahHeap::heap(); + + ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id); + + ShenandoahCancelledGCYieldClosure yield; + ShenandoahPrecleanCompleteGCClosure complete_gc; + + if (sh->has_forwarded_objects()) { + ShenandoahForwardedIsAliveClosure is_alive; + ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q); + ResourceMark rm; + _rp->preclean_discovered_references(&is_alive, &keep_alive, + &complete_gc, &yield, + NULL); + } else { + ShenandoahIsAliveClosure is_alive; + ShenandoahCMKeepAliveClosure keep_alive(q); + ResourceMark rm; + _rp->preclean_discovered_references(&is_alive, &keep_alive, + &complete_gc, &yield, + NULL); + } + } +}; + +void ShenandoahConcurrentMark::preclean_weak_refs() { + // Pre-cleaning weak references before diving into STW makes sense at the + // end of concurrent mark. This will filter out the references which referents + // are alive. Note that ReferenceProcessor already filters out these on reference + // discovery, and the bulk of work is done here. This phase processes leftovers + // that missed the initial filtering, i.e. when referent was marked alive after + // reference was discovered by RP. + + assert(_heap->process_references(), "sanity"); + + // Shortcut if no references were discovered to avoid winding up threads. + ReferenceProcessor* rp = _heap->ref_processor(); + if (!rp->has_discovered_references()) { + return; + } + + assert(task_queues()->is_empty(), "Should be empty"); + + ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahIsAliveSelector is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); + + // Execute precleaning in the worker thread: it will give us GCLABs, String dedup + // queues and other goodies. When upstream ReferenceProcessor starts supporting + // parallel precleans, we can extend this to more threads. + WorkGang* workers = _heap->workers(); + uint nworkers = workers->active_workers(); + assert(nworkers == 1, "This code uses only a single worker"); + task_queues()->reserve(nworkers); + + ShenandoahPrecleanTask task(rp); + workers->run_task(&task); + + assert(task_queues()->is_empty(), "Should be empty"); +} + +void ShenandoahConcurrentMark::cancel() { + // Clean up marking stacks. + ShenandoahObjToScanQueueSet* queues = task_queues(); + queues->clear(); + + // Cancel SATB buffers. + ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); +} + +ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) { + assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id); + return _task_queues->queue(worker_id); +} + +template +void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp, + bool strdedup) { + ShenandoahObjToScanQueue* q = get_queue(w); + + jushort* ld = _heap->get_liveness_cache(w); + + // TODO: We can clean up this if we figure out how to do templated oop closures that + // play nice with specialized_oop_iterators. + if (_heap->unload_classes()) { + if (_heap->has_forwarded_objects()) { + if (strdedup) { + ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } else { + ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } + } else { + if (strdedup) { + ShenandoahMarkRefsMetadataDedupClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } else { + ShenandoahMarkRefsMetadataClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } + } + } else { + if (_heap->has_forwarded_objects()) { + if (strdedup) { + ShenandoahMarkUpdateRefsDedupClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } else { + ShenandoahMarkUpdateRefsClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } + } else { + if (strdedup) { + ShenandoahMarkRefsDedupClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } else { + ShenandoahMarkRefsClosure cl(q, rp); + mark_loop_work(&cl, ld, w, t); + } + } + } + + _heap->flush_liveness_cache(w); +} + +template +void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) { + int seed = 17; + uintx stride = ShenandoahMarkLoopStride; + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahObjToScanQueueSet* queues = task_queues(); + ShenandoahObjToScanQueue* q; + ShenandoahMarkTask t; + + /* + * Process outstanding queues, if any. + * + * There can be more queues than workers. To deal with the imbalance, we claim + * extra queues first. Since marking can push new tasks into the queue associated + * with this worker id, we come back to process this queue in the normal loop. + */ + assert(queues->get_reserved() == heap->workers()->active_workers(), + "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers()); + + q = queues->claim_next(); + while (q != NULL) { + if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { + return; + } + + for (uint i = 0; i < stride; i++) { + if (q->pop(t)) { + do_task(q, cl, live_data, &t); + } else { + assert(q->is_empty(), "Must be empty"); + q = queues->claim_next(); + break; + } + } + } + q = get_queue(worker_id); + + ShenandoahConcMarkSATBBufferClosure drain_satb(q); + ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); + + /* + * Normal marking loop: + */ + while (true) { + if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { + return; + } + + while (satb_mq_set.completed_buffers_num() > 0) { + satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); + } + + uint work = 0; + for (uint i = 0; i < stride; i++) { + if (q->pop(t) || + queues->steal(worker_id, &seed, t)) { + do_task(q, cl, live_data, &t); + work++; + } else { + break; + } + } + + if (work == 0) { + // No work encountered in current stride, try to terminate. + // Need to leave the STS here otherwise it might block safepoints. + ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers); + ShenandoahTerminationTimingsTracker term_tracker(worker_id); + ShenandoahTerminatorTerminator tt(heap); + if (terminator->offer_termination(&tt)) return; + } + } +} + +bool ShenandoahConcurrentMark::claim_codecache() { + assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise"); + return _claimed_codecache.try_set(); +} + +void ShenandoahConcurrentMark::clear_claim_codecache() { + assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise"); + _claimed_codecache.unset(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp 2020-01-17 17:10:00.706130997 +0100 @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP + +#include "gc/shared/taskqueue.hpp" +#include "gc/shenandoah/shenandoahOopClosures.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" + +class ShenandoahStrDedupQueue; + +class ShenandoahConcurrentMark: public CHeapObj { + friend class ShenandoahTraversalGC; +private: + ShenandoahHeap* _heap; + ShenandoahObjToScanQueueSet* _task_queues; + +public: + void initialize(uint workers); + void cancel(); + +// ---------- Marking loop and tasks +// +private: + template + inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task); + + template + inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array); + + template + inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow); + + inline void count_liveness(jushort* live_data, oop obj); + + template + void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *t); + + template + void mark_loop_prework(uint worker_id, ShenandoahTaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup); + +public: + void mark_loop(uint worker_id, ShenandoahTaskTerminator* terminator, ReferenceProcessor *rp, + bool cancellable, bool strdedup) { + if (cancellable) { + mark_loop_prework(worker_id, terminator, rp, strdedup); + } else { + mark_loop_prework(worker_id, terminator, rp, strdedup); + } + } + + template + static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context); + + void mark_from_roots(); + void finish_mark_from_roots(bool full_gc); + + void mark_roots(ShenandoahPhaseTimings::Phase root_phase); + void update_roots(ShenandoahPhaseTimings::Phase root_phase); + void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase); + +// ---------- Weak references +// +private: + void weak_refs_work(bool full_gc); + void weak_refs_work_doit(bool full_gc); + + void weak_roots_work(); + +public: + void preclean_weak_refs(); + +// ---------- Concurrent code cache +// +private: + ShenandoahSharedFlag _claimed_codecache; + +public: + void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp); + bool claim_codecache(); + void clear_claim_codecache(); + +// ---------- Helpers +// Used from closures, need to be public +// +public: + ShenandoahObjToScanQueue* get_queue(uint worker_id); + ShenandoahObjToScanQueueSet* task_queues() { return _task_queues; } + +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp 2020-01-17 17:10:01.316130964 +0100 @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahConcurrentMark.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.inline.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/prefetch.inline.hpp" + +template +void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) { + oop obj = task->obj(); + + shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_gc()); + shenandoah_assert_marked(NULL, obj); + shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc()); + + if (task->is_not_chunked()) { + if (obj->is_instance()) { + // Case 1: Normal oop, process as usual. + obj->oop_iterate(cl); + } else if (obj->is_objArray()) { + // Case 2: Object array instance and no chunk is set. Must be the first + // time we visit it, start the chunked processing. + do_chunked_array_start(q, cl, obj); + } else { + // Case 3: Primitive array. Do nothing, no oops there. We use the same + // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: + // We skip iterating over the klass pointer since we know that + // Universe::TypeArrayKlass never moves. + assert (obj->is_typeArray(), "should be type array"); + } + // Count liveness the last: push the outstanding work to the queues first + count_liveness(live_data, obj); + } else { + // Case 4: Array chunk, has sensible chunk id. Process it. + do_chunked_array(q, cl, obj, task->chunk(), task->pow()); + } +} + +inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) { + size_t region_idx = _heap->heap_region_index_containing(obj); + ShenandoahHeapRegion* region = _heap->get_region(region_idx); + size_t size = obj->size(); + + if (!region->is_humongous_start()) { + assert(!region->is_humongous(), "Cannot have continuations here"); + size_t max = (1 << (sizeof(jushort) * 8)) - 1; + if (size >= max) { + // too big, add to region data directly + region->increase_live_data_gc_words(size); + } else { + jushort cur = live_data[region_idx]; + size_t new_val = cur + size; + if (new_val >= max) { + // overflow, flush to region data + region->increase_live_data_gc_words(new_val); + live_data[region_idx] = 0; + } else { + // still good, remember in locals + live_data[region_idx] = (jushort) new_val; + } + } + } else { + shenandoah_assert_in_correct_region(NULL, obj); + size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + + for (size_t i = region_idx; i < region_idx + num_regions; i++) { + ShenandoahHeapRegion* chain_reg = _heap->get_region(i); + assert(chain_reg->is_humongous(), "Expecting a humongous region"); + chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); + } + } +} + +template +inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { + assert(obj->is_objArray(), "expect object array"); + objArrayOop array = objArrayOop(obj); + int len = array->length(); + + if (len <= (int) ObjArrayMarkingStride*2) { + // A few slices only, process directly + array->oop_iterate_range(cl, 0, len); + } else { + int bits = log2_long(len); + // Compensate for non-power-of-two arrays, cover the array in excess: + if (len != (1 << bits)) bits++; + + // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to + // boundaries against array->length(), touching the array header on every chunk. + // + // To do this, we cut the prefix in full-sized chunks, and submit them on the queue. + // If the array is not divided in chunk sizes, then there would be an irregular tail, + // which we will process separately. + + int last_idx = 0; + + int chunk = 1; + int pow = bits; + + // Handle overflow + if (pow >= 31) { + assert (pow == 31, "sanity"); + pow--; + chunk = 2; + last_idx = (1 << pow); + bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); + assert(pushed, "overflow queue should always succeed pushing"); + } + + // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last + // successful right boundary to figure out the irregular tail. + while ((1 << pow) > (int)ObjArrayMarkingStride && + (chunk*2 < ShenandoahMarkTask::chunk_size())) { + pow--; + int left_chunk = chunk*2 - 1; + int right_chunk = chunk*2; + int left_chunk_end = left_chunk * (1 << pow); + if (left_chunk_end < len) { + bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); + assert(pushed, "overflow queue should always succeed pushing"); + chunk = right_chunk; + last_idx = left_chunk_end; + } else { + chunk = left_chunk; + } + } + + // Process the irregular tail, if present + int from = last_idx; + if (from < len) { + array->oop_iterate_range(cl, from, len); + } + } +} + +template +inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { + assert(obj->is_objArray(), "expect object array"); + objArrayOop array = objArrayOop(obj); + + assert (ObjArrayMarkingStride > 0, "sanity"); + + // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that + // are known to start beyond the array. + while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { + pow--; + chunk *= 2; + bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); + assert(pushed, "overflow queue should always succeed pushing"); + } + + int chunk_size = 1 << pow; + + int from = (chunk - 1) * chunk_size; + int to = chunk * chunk_size; + +#ifdef ASSERT + int len = array->length(); + assert (0 <= from && from < len, "from is sane: %d/%d", from, len); + assert (0 < to && to <= len, "to is sane: %d/%d", to, len); +#endif + + array->oop_iterate_range(cl, from, to); +} + +class ShenandoahConcMarkSATBBufferClosure : public ShenandoahSATBBufferClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; +public: + ShenandoahConcMarkSATBBufferClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()) + { + } + + void do_buffer(void **buffer, size_t size) { + if (_heap->has_forwarded_objects()) { + if (ShenandoahStringDedup::is_enabled()) { + do_buffer_impl(buffer, size); + } else { + do_buffer_impl(buffer, size); + } + } else { + if (ShenandoahStringDedup::is_enabled()) { + do_buffer_impl(buffer, size); + } else { + do_buffer_impl(buffer, size); + } + } + } + + template + void do_buffer_impl(void **buffer, size_t size) { + for (size_t i = 0; i < size; ++i) { + oop *p = (oop *) &buffer[i]; + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); + } + } +}; + +template +inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + switch (UPDATE_REFS) { + case NONE: + break; + case RESOLVE: + obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + break; + case SIMPLE: + // We piggy-back reference updating to the marking tasks. + obj = heap->update_with_forwarded_not_null(p, obj); + break; + case CONCURRENT: + obj = heap->maybe_update_with_forwarded_not_null(p, obj); + break; + default: + ShouldNotReachHere(); + } + + // Note: Only when concurrently updating references can obj be different + // (that is, really different, not just different from-/to-space copies of the same) + // from the one we originally loaded. Mutator thread can beat us by writing something + // else into the location. In that case, we would mark through that updated value, + // on the off-chance it is not handled by other means (e.g. via SATB). However, + // if that write was NULL, we don't need to do anything else. + if (UPDATE_REFS != CONCURRENT || !CompressedOops::is_null(obj)) { + shenandoah_assert_not_forwarded(p, obj); + shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); + + if (mark_context->mark(obj)) { + bool pushed = q->push(ShenandoahMarkTask(obj)); + assert(pushed, "overflow queue should always succeed pushing"); + + if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) { + assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); + ShenandoahStringDedup::enqueue_candidate(obj); + } + } + + shenandoah_assert_marked(p, obj); + } + } +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2020-01-17 17:10:01.920130930 +0100 @@ -0,0 +1,620 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVMOperations.hpp" +#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "memory/iterator.hpp" +#include "memory/universe.hpp" + +ShenandoahControlThread::ShenandoahControlThread() : + ConcurrentGCThread(), + _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always), + _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always), + _periodic_task(this), + _requested_gc_cause(GCCause::_no_cause_specified), + _degen_point(ShenandoahHeap::_degenerated_outside_cycle), + _allocs_seen(0) { + + create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority); + _periodic_task.enroll(); + _periodic_satb_flush_task.enroll(); +} + +ShenandoahControlThread::~ShenandoahControlThread() { + // This is here so that super is called. +} + +void ShenandoahPeriodicTask::task() { + _thread->handle_force_counters_update(); + _thread->handle_counters_update(); +} + +void ShenandoahPeriodicSATBFlushTask::task() { + ShenandoahHeap::heap()->force_satb_flush_all_threads(); +} + +void ShenandoahControlThread::run_service() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + GCMode default_mode = heap->is_traversal_mode() ? + concurrent_traversal : concurrent_normal; + GCCause::Cause default_cause = heap->is_traversal_mode() ? + GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc; + int sleep = ShenandoahControlIntervalMin; + + double last_shrink_time = os::elapsedTime(); + double last_sleep_adjust_time = os::elapsedTime(); + + // Shrink period avoids constantly polling regions for shrinking. + // Having a period 10x lower than the delay would mean we hit the + // shrinking with lag of less than 1/10-th of true delay. + // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. + double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; + + ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); + ShenandoahHeuristics* heuristics = heap->heuristics(); + while (!in_graceful_shutdown() && !should_terminate()) { + // Figure out if we have pending requests. + bool alloc_failure_pending = _alloc_failure_gc.is_set(); + bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); + bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); + + // This control loop iteration have seen this much allocations. + size_t allocs_seen = Atomic::xchg(0, &_allocs_seen); + + // Choose which GC mode to run in. The block below should select a single mode. + GCMode mode = none; + GCCause::Cause cause = GCCause::_last_gc_cause; + ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; + + if (alloc_failure_pending) { + // Allocation failure takes precedence: we have to deal with it first thing + log_info(gc)("Trigger: Handle Allocation Failure"); + + cause = GCCause::_allocation_failure; + + // Consume the degen point, and seed it with default value + degen_point = _degen_point; + _degen_point = ShenandoahHeap::_degenerated_outside_cycle; + + if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { + heuristics->record_allocation_failure_gc(); + policy->record_alloc_failure_to_degenerated(degen_point); + mode = stw_degenerated; + } else { + heuristics->record_allocation_failure_gc(); + policy->record_alloc_failure_to_full(); + mode = stw_full; + } + + } else if (explicit_gc_requested) { + cause = _requested_gc_cause; + log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); + + heuristics->record_requested_gc(); + + if (ExplicitGCInvokesConcurrent) { + policy->record_explicit_to_concurrent(); + mode = default_mode; + // Unload and clean up everything + heap->set_process_references(heuristics->can_process_references()); + heap->set_unload_classes(heuristics->can_unload_classes()); + } else { + policy->record_explicit_to_full(); + mode = stw_full; + } + } else if (implicit_gc_requested) { + cause = _requested_gc_cause; + log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); + + heuristics->record_requested_gc(); + + if (ShenandoahImplicitGCInvokesConcurrent) { + policy->record_implicit_to_concurrent(); + mode = default_mode; + + // Unload and clean up everything + heap->set_process_references(heuristics->can_process_references()); + heap->set_unload_classes(heuristics->can_unload_classes()); + } else { + policy->record_implicit_to_full(); + mode = stw_full; + } + } else { + // Potential normal cycle: ask heuristics if it wants to act + if (heuristics->should_start_gc()) { + mode = default_mode; + cause = default_cause; + } + + // Ask policy if this cycle wants to process references or unload classes + heap->set_process_references(heuristics->should_process_references()); + heap->set_unload_classes(heuristics->should_unload_classes()); + } + + // Blow all soft references on this cycle, if handling allocation failure, + // or we are requested to do so unconditionally. + if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) { + heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); + } + + bool gc_requested = (mode != none); + assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); + + if (gc_requested) { + heap->reset_bytes_allocated_since_gc_start(); + + // If GC was requested, we are sampling the counters even without actual triggers + // from allocation machinery. This captures GC phases more accurately. + set_forced_counters_update(true); + + // If GC was requested, we better dump freeset data for performance debugging + { + ShenandoahHeapLocker locker(heap->lock()); + heap->free_set()->log_status(); + } + } + + switch (mode) { + case none: + break; + case concurrent_traversal: + service_concurrent_traversal_cycle(cause); + break; + case concurrent_normal: + service_concurrent_normal_cycle(cause); + break; + case stw_degenerated: + service_stw_degenerated_cycle(cause, degen_point); + break; + case stw_full: + service_stw_full_cycle(cause); + break; + default: + ShouldNotReachHere(); + } + + if (gc_requested) { + // If this was the requested GC cycle, notify waiters about it + if (explicit_gc_requested || implicit_gc_requested) { + notify_gc_waiters(); + } + + // If this was the allocation failure GC cycle, notify waiters about it + if (alloc_failure_pending) { + notify_alloc_failure_waiters(); + } + + // Report current free set state at the end of cycle, whether + // it is a normal completion, or the abort. + { + ShenandoahHeapLocker locker(heap->lock()); + heap->free_set()->log_status(); + + // Notify Universe about new heap usage. This has implications for + // global soft refs policy, and we better report it every time heap + // usage goes down. + Universe::update_heap_info_at_gc(); + } + + // Disable forced counters update, and update counters one more time + // to capture the state at the end of GC session. + handle_force_counters_update(); + set_forced_counters_update(false); + + // Retract forceful part of soft refs policy + heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); + + // Clear metaspace oom flag, if current cycle unloaded classes + if (heap->unload_classes()) { + heuristics->clear_metaspace_oom(); + } + + // GC is over, we are at idle now + if (ShenandoahPacing) { + heap->pacer()->setup_for_idle(); + } + } else { + // Allow allocators to know we have seen this much regions + if (ShenandoahPacing && (allocs_seen > 0)) { + heap->pacer()->report_alloc(allocs_seen); + } + } + + double current = os::elapsedTime(); + + if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) { + // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything. + // Regular paths uncommit only occasionally. + double shrink_before = explicit_gc_requested ? + current : + current - (ShenandoahUncommitDelay / 1000.0); + service_uncommit(shrink_before); + last_shrink_time = current; + } + + // Wait before performing the next action. If allocation happened during this wait, + // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, + // back off exponentially. + if (_heap_changed.try_unset()) { + sleep = ShenandoahControlIntervalMin; + } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ + sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); + last_sleep_adjust_time = current; + } + os::naked_short_sleep(sleep); + } + + // Wait for the actual stop(), can't leave run_service() earlier. + while (!should_terminate()) { + os::naked_short_sleep(ShenandoahControlIntervalMin); + } +} + +void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) { + GCIdMark gc_id_mark; + ShenandoahGCSession session(cause); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + + // Reset for upcoming cycle + heap->entry_reset(); + + heap->vmop_entry_init_traversal(); + + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; + + heap->entry_traversal(); + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; + + heap->vmop_entry_final_traversal(); + + heap->entry_cleanup(); + + heap->heuristics()->record_success_concurrent(); + heap->shenandoah_policy()->record_success_concurrent(); +} + +void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { + // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during + // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. + // If second allocation failure happens during Degenerated GC cycle (for example, when GC + // tries to evac something and no memory is available), cycle degrades to Full GC. + // + // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when + // heuristics says there are no regions to compact, and all the collection comes from immediately + // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the + // mark from the next cycle. + // + // ................................................................................................ + // + // (immediate garbage shortcut) Concurrent GC + // /-------------------------------------------\ + // | (coalesced UR) v + // | /----------------------->o + // | | | + // | | v + // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] + // | | | ^ + // | (af) | (af) | (af) | + // ..................|....................|.................|..............|....................... + // | | | | + // | | | | Degenerated GC + // v v v | + // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o + // | | | ^ + // | (af) | (af) | (af) | + // ..................|....................|.................|..............|....................... + // | | | | + // | v | | Full GC + // \------------------->o<----------------/ | + // | | + // v | + // Full GC --------------------------/ + // + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; + + GCIdMark gc_id_mark; + ShenandoahGCSession session(cause); + + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + + // Reset for upcoming marking + heap->entry_reset(); + + // Start initial mark under STW + heap->vmop_entry_init_mark(); + + // Continue concurrent mark + heap->entry_mark(); + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; + + // If not cancelled, can try to concurrently pre-clean + heap->entry_preclean(); + + // Complete marking under STW, and start evacuation + heap->vmop_entry_final_mark(); + + // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim + // the space. This would be the last action if there is nothing to evacuate. + heap->entry_cleanup(); + + { + ShenandoahHeapLocker locker(heap->lock()); + heap->free_set()->log_status(); + } + + // Continue the cycle with evacuation and optional update-refs. + // This may be skipped if there is nothing to evacuate. + // If so, evac_in_progress would be unset by collection set preparation code. + if (heap->is_evacuation_in_progress()) { + // Concurrently evacuate + heap->entry_evac(); + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; + + // Perform update-refs phase, if required. This phase can be skipped if heuristics + // decides to piggy-back the update-refs on the next marking cycle. On either path, + // we need to turn off evacuation: either in init-update-refs, or in final-evac. + if (heap->heuristics()->should_start_update_refs()) { + heap->vmop_entry_init_updaterefs(); + heap->entry_updaterefs(); + if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; + + heap->vmop_entry_final_updaterefs(); + + // Update references freed up collection set, kick the cleanup to reclaim the space. + heap->entry_cleanup(); + + } else { + heap->vmop_entry_final_evac(); + } + } + + // Cycle is complete + heap->heuristics()->record_success_concurrent(); + heap->shenandoah_policy()->record_success_concurrent(); +} + +bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->cancelled_gc()) { + assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); + if (!in_graceful_shutdown()) { + assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, + "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)); + _degen_point = point; + } + return true; + } + return false; +} + +void ShenandoahControlThread::stop_service() { + // Nothing to do here. +} + +void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { + GCIdMark gc_id_mark; + ShenandoahGCSession session(cause); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->vmop_entry_full(cause); + + heap->heuristics()->record_success_full(); + heap->shenandoah_policy()->record_success_full(); +} + +void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { + assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); + + GCIdMark gc_id_mark; + ShenandoahGCSession session(cause); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->vmop_degenerated(point); + + heap->heuristics()->record_success_degenerated(); + heap->shenandoah_policy()->record_success_degenerated(); +} + +void ShenandoahControlThread::service_uncommit(double shrink_before) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Determine if there is work to do. This avoids taking heap lock if there is + // no work available, avoids spamming logs with superfluous logging messages, + // and minimises the amount of work while locks are taken. + + if (heap->committed() <= heap->min_capacity()) return; + + bool has_work = false; + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion *r = heap->get_region(i); + if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { + has_work = true; + break; + } + } + + if (has_work) { + heap->entry_uncommit(shrink_before); + } +} + +bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { + return GCCause::is_user_requested_gc(cause) || + GCCause::is_serviceability_requested_gc(cause); +} + +void ShenandoahControlThread::request_gc(GCCause::Cause cause) { + assert(GCCause::is_user_requested_gc(cause) || + GCCause::is_serviceability_requested_gc(cause) || + cause == GCCause::_metadata_GC_clear_soft_refs || + cause == GCCause::_full_gc_alot || + cause == GCCause::_wb_full_gc || + cause == GCCause::_scavenge_alot, + "only requested GCs here"); + + if (is_explicit_gc(cause)) { + if (!DisableExplicitGC) { + handle_requested_gc(cause); + } + } else { + handle_requested_gc(cause); + } +} + +void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { + _requested_gc_cause = cause; + _gc_requested.set(); + MonitorLockerEx ml(&_gc_waiters_lock); + while (_gc_requested.is_set()) { + ml.wait(); + } +} + +void ShenandoahControlThread::handle_alloc_failure(size_t words) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + assert(current()->is_Java_thread(), "expect Java thread here"); + + if (try_set_alloc_failure_gc()) { + // Only report the first allocation failure + log_info(gc)("Failed to allocate " SIZE_FORMAT "%s", + byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); + + // Now that alloc failure GC is scheduled, we can abort everything else + heap->cancel_gc(GCCause::_allocation_failure); + } + + MonitorLockerEx ml(&_alloc_failure_waiters_lock); + while (is_alloc_failure_gc()) { + ml.wait(); + } +} + +void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + if (try_set_alloc_failure_gc()) { + // Only report the first allocation failure + log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", + byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); + } + + // Forcefully report allocation failure + heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); +} + +void ShenandoahControlThread::notify_alloc_failure_waiters() { + _alloc_failure_gc.unset(); + MonitorLockerEx ml(&_alloc_failure_waiters_lock); + ml.notify_all(); +} + +bool ShenandoahControlThread::try_set_alloc_failure_gc() { + return _alloc_failure_gc.try_set(); +} + +bool ShenandoahControlThread::is_alloc_failure_gc() { + return _alloc_failure_gc.is_set(); +} + +void ShenandoahControlThread::notify_gc_waiters() { + _gc_requested.unset(); + MonitorLockerEx ml(&_gc_waiters_lock); + ml.notify_all(); +} + +void ShenandoahControlThread::handle_counters_update() { + if (_do_counters_update.is_set()) { + _do_counters_update.unset(); + ShenandoahHeap::heap()->monitoring_support()->update_counters(); + } +} + +void ShenandoahControlThread::handle_force_counters_update() { + if (_force_counters_update.is_set()) { + _do_counters_update.unset(); // reset these too, we do update now! + ShenandoahHeap::heap()->monitoring_support()->update_counters(); + } +} + +void ShenandoahControlThread::notify_heap_changed() { + // This is called from allocation path, and thus should be fast. + + // Update monitoring counters when we took a new region. This amortizes the + // update costs on slow path. + if (_do_counters_update.is_unset()) { + _do_counters_update.set(); + } + // Notify that something had changed. + if (_heap_changed.is_unset()) { + _heap_changed.set(); + } +} + +void ShenandoahControlThread::pacing_notify_alloc(size_t words) { + assert(ShenandoahPacing, "should only call when pacing is enabled"); + Atomic::add(words, &_allocs_seen); +} + +void ShenandoahControlThread::set_forced_counters_update(bool value) { + _force_counters_update.set_cond(value); +} + +void ShenandoahControlThread::print() const { + print_on(tty); +} + +void ShenandoahControlThread::print_on(outputStream* st) const { + st->print("Shenandoah Concurrent Thread"); + Thread::print_on(st); + st->cr(); +} + +void ShenandoahControlThread::start() { + create_and_start(); +} + +void ShenandoahControlThread::prepare_for_graceful_shutdown() { + _graceful_shutdown.set(); +} + +bool ShenandoahControlThread::in_graceful_shutdown() { + return _graceful_shutdown.is_set(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp 2020-01-17 17:10:02.535130896 +0100 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP + +#include "gc/shared/gcCause.hpp" +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "runtime/task.hpp" +#include "utilities/ostream.hpp" + +// Periodic task is useful for doing asynchronous things that do not require (heap) locks, +// or synchronization with other parts of collector. These could run even when ShenandoahConcurrentThread +// is busy driving the GC cycle. +class ShenandoahPeriodicTask : public PeriodicTask { +private: + ShenandoahControlThread* _thread; +public: + ShenandoahPeriodicTask(ShenandoahControlThread* thread) : + PeriodicTask(100), _thread(thread) {} + virtual void task(); +}; + +// Periodic task to flush SATB buffers periodically. +class ShenandoahPeriodicSATBFlushTask : public PeriodicTask { +public: + ShenandoahPeriodicSATBFlushTask() : PeriodicTask(ShenandoahSATBBufferFlushInterval) {} + virtual void task(); +}; + +class ShenandoahControlThread: public ConcurrentGCThread { + friend class VMStructs; + +private: + typedef enum { + none, + concurrent_traversal, + concurrent_normal, + stw_degenerated, + stw_full + } GCMode; + + // While we could have a single lock for these, it may risk unblocking + // GC waiters when alloc failure GC cycle finishes. We want instead + // to make complete explicit cycle for for demanding customers. + Monitor _alloc_failure_waiters_lock; + Monitor _gc_waiters_lock; + ShenandoahPeriodicTask _periodic_task; + ShenandoahPeriodicSATBFlushTask _periodic_satb_flush_task; + +public: + void run_service(); + void stop_service(); + +private: + ShenandoahSharedFlag _gc_requested; + ShenandoahSharedFlag _alloc_failure_gc; + ShenandoahSharedFlag _graceful_shutdown; + ShenandoahSharedFlag _heap_changed; + ShenandoahSharedFlag _do_counters_update; + ShenandoahSharedFlag _force_counters_update; + GCCause::Cause _requested_gc_cause; + ShenandoahHeap::ShenandoahDegenPoint _degen_point; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _allocs_seen; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + bool check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point); + void service_concurrent_normal_cycle(GCCause::Cause cause); + void service_stw_full_cycle(GCCause::Cause cause); + void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point); + void service_concurrent_traversal_cycle(GCCause::Cause cause); + void service_uncommit(double shrink_before); + + bool try_set_alloc_failure_gc(); + void notify_alloc_failure_waiters(); + bool is_alloc_failure_gc(); + + void notify_gc_waiters(); + + // Handle GC request. + // Blocks until GC is over. + void handle_requested_gc(GCCause::Cause cause); + + bool is_explicit_gc(GCCause::Cause cause) const; +public: + // Constructor + ShenandoahControlThread(); + ~ShenandoahControlThread(); + + // Handle allocation failure from normal allocation. + // Blocks until memory is available. + void handle_alloc_failure(size_t words); + + // Handle allocation failure from evacuation path. + // Optionally blocks while collector is handling the failure. + void handle_alloc_failure_evac(size_t words); + + void request_gc(GCCause::Cause cause); + + void handle_counters_update(); + void handle_force_counters_update(); + void set_forced_counters_update(bool value); + + void notify_heap_changed(); + + void pacing_notify_alloc(size_t words); + + void start(); + void prepare_for_graceful_shutdown(); + bool in_graceful_shutdown(); + + char* name() const { return (char*)"ShenandoahControlThread";} + + // Printing + void print_on(outputStream* st) const; + void print() const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp 2020-01-17 17:10:03.138130863 +0100 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "runtime/orderAccess.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.hpp" + +const jint ShenandoahEvacOOMHandler::OOM_MARKER_MASK = 0x80000000; + +ShenandoahEvacOOMHandler::ShenandoahEvacOOMHandler() : + _threads_in_evac(0) { +} + +void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { + while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) { + os::naked_short_sleep(1); + } + // At this point we are sure that no threads can evacuate anything. Raise + // the thread-local oom_during_evac flag to indicate that any attempt + // to evacuate should simply return the forwarding pointer instead (which is safe now). + ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), true); +} + +void ShenandoahEvacOOMHandler::enter_evacuation() { + jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); + + assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); + assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); + + if ((threads_in_evac & OOM_MARKER_MASK) != 0) { + wait_for_no_evac_threads(); + return; + } + + while (true) { + jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac); + if (other == threads_in_evac) { + // Success: caller may safely enter evacuation + DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), true)); + return; + } else { + // Failure: + // - if offender has OOM_MARKER_MASK, then loop until no more threads in evac + // - otherwise re-try CAS + if ((other & OOM_MARKER_MASK) != 0) { + wait_for_no_evac_threads(); + return; + } + threads_in_evac = other; + } + } +} + +void ShenandoahEvacOOMHandler::leave_evacuation() { + if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { + assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity"); + // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive. + Atomic::dec(&_threads_in_evac); + } else { + // If we get here, the current thread has already gone through the + // OOM-during-evac protocol and has thus either never entered or successfully left + // the evacuation region. Simply flip its TL oom-during-evac flag back off. + ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), false); + } + DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), false)); + assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must be turned off"); +} + +void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { + assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); + assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); + + jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); + while (true) { + jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK, + &_threads_in_evac, threads_in_evac); + if (other == threads_in_evac) { + // Success: wait for other threads to get out of the protocol and return. + wait_for_no_evac_threads(); + return; + } else { + // Failure: try again with updated new value. + threads_in_evac = other; + } + } +} + +void ShenandoahEvacOOMHandler::clear() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); + assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity"); + OrderAccess::release_store_fence(&_threads_in_evac, 0); +} + +ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() { + ShenandoahHeap::heap()->enter_evacuation(); +} + +ShenandoahEvacOOMScope::~ShenandoahEvacOOMScope() { + ShenandoahHeap::heap()->leave_evacuation(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp 2020-01-17 17:10:03.747130830 +0100 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +/** + * Provides safe handling of out-of-memory situations during evacuation. + * + * When a Java thread encounters out-of-memory while evacuating an object in a + * load-reference-barrier (i.e. it cannot copy the object to to-space), it does not + * necessarily follow we can return immediately from the LRB (and store to from-space). + * + * In very basic case, on such failure we may wait until the the evacuation is over, + * and then resolve the forwarded copy, and to the store there. This is possible + * because other threads might still have space in their GCLABs, and successfully + * evacuate the object. + * + * But, there is a race due to non-atomic evac_in_progress transition. Consider + * thread A is stuck waiting for the evacuation to be over -- it cannot leave with + * from-space copy yet. Control thread drops evacuation_in_progress preparing for + * next STW phase that has to recover from OOME. Thread B misses that update, and + * successfully evacuates the object, does the write to to-copy. But, before + * Thread B is able to install the fwdptr, thread A discovers evac_in_progress is + * down, exits from here, reads the fwdptr, discovers old from-copy, and stores there. + * Thread B then wakes up and installs to-copy. This breaks to-space invariant, and + * silently corrupts the heap: we accepted two writes to separate copies of the object. + * + * The way it is solved here is to maintain a counter of threads inside the + * 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual + * allocation, copying and CASing of the copy object, and is protected by this + * OOM-during-evac-handler. The handler allows multiple threads to enter and exit + * evacuation path, but on OOME it requires all threads that experienced OOME to wait + * for current threads to leave, and blocks other threads from entering. + * + * Detailed state change: + * + * Upon entry of the evac-path, entering thread will attempt to increase the counter, + * using a CAS. Depending on the result of the CAS: + * - success: carry on with evac + * - failure: + * - if offending value is a valid counter, then try again + * - if offending value is OOM-during-evac special value: loop until + * counter drops to 0, then exit with resolving the ptr + * + * Upon exit, exiting thread will decrease the counter using atomic dec. + * + * Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac + * special value into the counter. Depending on result: + * - success: busy-loop until counter drops to zero, then exit with resolve + * - failure: + * - offender is valid counter update: try again + * - offender is OOM-during-evac: busy loop until counter drops to + * zero, then exit with resolve + */ +class ShenandoahEvacOOMHandler { +private: + static const jint OOM_MARKER_MASK; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint)); + volatile jint _threads_in_evac; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + void wait_for_no_evac_threads(); + +public: + ShenandoahEvacOOMHandler(); + + /** + * Attempt to enter the protected evacuation path. + * + * When this returns true, it is safe to continue with normal evacuation. + * When this method returns false, evacuation must not be entered, and caller + * may safely continue with a simple resolve (if Java thread). + */ + void enter_evacuation(); + + /** + * Leave evacuation path. + */ + void leave_evacuation(); + + /** + * Signal out-of-memory during evacuation. It will prevent any other threads + * from entering the evacuation path, then wait until all threads have left the + * evacuation path, and then return. It is then safe to continue with a simple resolve. + */ + void handle_out_of_memory_during_evacuation(); + + void clear(); +}; + +class ShenandoahEvacOOMScope : public StackObj { +public: + ShenandoahEvacOOMScope(); + ~ShenandoahEvacOOMScope(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp 2020-01-17 17:10:04.341130797 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP + +#include "oops/oop.hpp" +#include "utilities/globalDefinitions.hpp" + +class ShenandoahForwarding { +public: + /* Gets forwardee from the given object. + */ + static inline oop get_forwardee(oop obj); + + /* Returns the raw value from forwardee slot. + */ + static inline HeapWord* get_forwardee_raw(oop obj); + + /* Returns the raw value from forwardee slot without any checks. + * Used for quick verification. + */ + static inline HeapWord* get_forwardee_raw_unchecked(oop obj); + + /** + * Returns true if the object is forwarded, false otherwise. + */ + static inline bool is_forwarded(oop obj); + + /* Tries to atomically update forwardee in $holder object to $update. + * Assumes $holder points at itself. + * Asserts $holder is in from-space. + * Asserts $update is in to-space. + * + * Returns the new object 'update' upon success, or + * the new forwardee that a competing thread installed. + */ + static inline oop try_update_forwardee(oop obj, oop update); + +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp 2020-01-17 17:10:04.951130763 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahForwarding.hpp" +#include "oops/markOop.inline.hpp" +#include "runtime/atomic.hpp" + +inline HeapWord* ShenandoahForwarding::get_forwardee_raw(oop obj) { + shenandoah_assert_in_heap(NULL, obj); + return get_forwardee_raw_unchecked(obj); +} + +inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) { + markOop mark = obj->mark_raw(); + if (mark->is_marked()) { + return (HeapWord*) mark->clear_lock_bits(); + } else { + return (HeapWord*) obj; + } +} + +inline oop ShenandoahForwarding::get_forwardee(oop obj) { + shenandoah_assert_correct(NULL, obj); + return oop(get_forwardee_raw_unchecked(obj)); +} + +inline bool ShenandoahForwarding::is_forwarded(oop obj) { + return obj->mark_raw()->is_marked(); +} + +inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) { + markOop old_mark = obj->mark_raw(); + if (old_mark->is_marked()) { + return (oop) old_mark->clear_lock_bits(); + } + + markOop new_mark = markOopDesc::encode_pointer_as_mark(update); + markOop prev_mark = obj->cas_set_mark_raw(new_mark, old_mark); + if (prev_mark == old_mark) { + return update; + } else { + return (oop) prev_mark->clear_lock_bits(); + } +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp 2020-01-17 17:10:05.554130730 +0100 @@ -0,0 +1,630 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "logging/logStream.hpp" + +ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : + _heap(heap), + _mutator_free_bitmap(max_regions, mtGC), + _collector_free_bitmap(max_regions, mtGC), + _max(max_regions) +{ + clear_internal(); +} + +void ShenandoahFreeSet::increase_used(size_t num_bytes) { + assert_heaplock_owned_by_current_thread(); + _used += num_bytes; + + assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT + ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes); +} + +bool ShenandoahFreeSet::is_mutator_free(size_t idx) const { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", + idx, _max, _mutator_leftmost, _mutator_rightmost); + return _mutator_free_bitmap.at(idx); +} + +bool ShenandoahFreeSet::is_collector_free(size_t idx) const { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", + idx, _max, _collector_leftmost, _collector_rightmost); + return _collector_free_bitmap.at(idx); +} + +HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { + // Scan the bitmap looking for a first fit. + // + // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, + // we would find the region to allocate at right away. + // + // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs + // go to the end. This makes application allocation faster, because we would clear lots + // of regions from the beginning most of the time. + // + // Free set maintains mutator and collector views, and normally they allocate in their views only, + // unless we special cases for stealing and mixed allocations. + + switch (req.type()) { + case ShenandoahAllocRequest::_alloc_tlab: + case ShenandoahAllocRequest::_alloc_shared: { + + // Try to allocate in the mutator view + for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { + if (is_mutator_free(idx)) { + HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); + if (result != NULL) { + return result; + } + } + } + + // There is no recovery. Mutator does not touch collector view at all. + break; + } + case ShenandoahAllocRequest::_alloc_gclab: + case ShenandoahAllocRequest::_alloc_shared_gc: { + // size_t is unsigned, need to dodge underflow when _leftmost = 0 + + // Fast-path: try to allocate in the collector view first + for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) { + size_t idx = c - 1; + if (is_collector_free(idx)) { + HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); + if (result != NULL) { + return result; + } + } + } + + // No dice. Can we borrow space from mutator view? + if (!ShenandoahEvacReserveOverflow) { + return NULL; + } + + // Try to steal the empty region from the mutator view + for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { + size_t idx = c - 1; + if (is_mutator_free(idx)) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + if (is_empty_or_trash(r)) { + flip_to_gc(r); + HeapWord *result = try_allocate_in(r, req, in_new_region); + if (result != NULL) { + return result; + } + } + } + } + + // Try to mix the allocation into the mutator view: + if (ShenandoahAllowMixedAllocs) { + for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { + size_t idx = c - 1; + if (is_mutator_free(idx)) { + HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); + if (result != NULL) { + return result; + } + } + } + } + break; + } + default: + ShouldNotReachHere(); + } + + return NULL; +} + +HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { + assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()); + + try_recycle_trashed(r); + + in_new_region = r->is_empty(); + + HeapWord* result = NULL; + size_t size = req.size(); + + if (ShenandoahElasticTLAB && req.is_lab_alloc()) { + size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); + if (size > free) { + size = free; + } + if (size >= req.min_size()) { + result = r->allocate(size, req.type()); + assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size); + } + } else { + result = r->allocate(size, req.type()); + } + + if (result != NULL) { + // Allocation successful, bump stats: + if (req.is_mutator_alloc()) { + increase_used(size * HeapWordSize); + } + + // Record actual allocation size + req.set_actual_size(size); + + if (req.is_gc_alloc() && _heap->is_concurrent_traversal_in_progress()) { + // Traversal needs to traverse through GC allocs. Adjust TAMS to the new top + // so that these allocations appear below TAMS, and thus get traversed. + // See top of shenandoahTraversal.cpp for an explanation. + _heap->marking_context()->capture_top_at_mark_start(r); + _heap->traversal_gc()->traversal_set()->add_region_check_for_duplicates(r); + OrderAccess::fence(); + } + } + + if (result == NULL || has_no_alloc_capacity(r)) { + // Region cannot afford this or future allocations. Retire it. + // + // While this seems a bit harsh, especially in the case when this large allocation does not + // fit, but the next small one would, we are risking to inflate scan times when lots of + // almost-full regions precede the fully-empty region where we want allocate the entire TLAB. + // TODO: Record first fully-empty region, and use that for large allocations + + // Record the remainder as allocation waste + if (req.is_mutator_alloc()) { + size_t waste = r->free(); + if (waste > 0) { + increase_used(waste); + _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true); + } + } + + size_t num = r->region_number(); + _collector_free_bitmap.clear_bit(num); + _mutator_free_bitmap.clear_bit(num); + // Touched the bounds? Need to update: + if (touches_bounds(num)) { + adjust_bounds(); + } + assert_bounds(); + } + return result; +} + +bool ShenandoahFreeSet::touches_bounds(size_t num) const { + return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost; +} + +void ShenandoahFreeSet::recompute_bounds() { + // Reset to the most pessimistic case: + _mutator_rightmost = _max - 1; + _mutator_leftmost = 0; + _collector_rightmost = _max - 1; + _collector_leftmost = 0; + + // ...and adjust from there + adjust_bounds(); +} + +void ShenandoahFreeSet::adjust_bounds() { + // Rewind both mutator bounds until the next bit. + while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) { + _mutator_leftmost++; + } + while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) { + _mutator_rightmost--; + } + // Rewind both collector bounds until the next bit. + while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) { + _collector_leftmost++; + } + while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) { + _collector_rightmost--; + } +} + +HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { + assert_heaplock_owned_by_current_thread(); + + size_t words_size = req.size(); + size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); + + // No regions left to satisfy allocation, bye. + if (num > mutator_count()) { + return NULL; + } + + // Find the continuous interval of $num regions, starting from $beg and ending in $end, + // inclusive. Contiguous allocations are biased to the beginning. + + size_t beg = _mutator_leftmost; + size_t end = beg; + + while (true) { + if (end >= _max) { + // Hit the end, goodbye + return NULL; + } + + // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. + // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. + if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) { + end++; + beg = end; + continue; + } + + if ((end - beg + 1) == num) { + // found the match + break; + } + + end++; + }; + + size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); + + // Initialize regions: + for (size_t i = beg; i <= end; i++) { + ShenandoahHeapRegion* r = _heap->get_region(i); + try_recycle_trashed(r); + + assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous"); + assert(r->is_empty(), "Should be empty"); + + if (i == beg) { + r->make_humongous_start(); + } else { + r->make_humongous_cont(); + } + + // Trailing region may be non-full, record the remainder there + size_t used_words; + if ((i == end) && (remainder != 0)) { + used_words = remainder; + } else { + used_words = ShenandoahHeapRegion::region_size_words(); + } + + r->set_top(r->bottom() + used_words); + r->reset_alloc_metadata_to_shared(); + + _mutator_free_bitmap.clear_bit(r->region_number()); + } + + // While individual regions report their true use, all humongous regions are + // marked used in the free set. + increase_used(ShenandoahHeapRegion::region_size_bytes() * num); + + if (remainder != 0) { + // Record this remainder as allocation waste + _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); + } + + // Allocated at left/rightmost? Move the bounds appropriately. + if (beg == _mutator_leftmost || end == _mutator_rightmost) { + adjust_bounds(); + } + assert_bounds(); + + req.set_actual_size(words_size); + return _heap->get_region(beg)->bottom(); +} + +bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) { + return r->is_empty() || r->is_trash(); +} + +size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { + if (r->is_trash()) { + // This would be recycled on allocation path + return ShenandoahHeapRegion::region_size_bytes(); + } else { + return r->free(); + } +} + +bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) { + return alloc_capacity(r) == 0; +} + +void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { + if (r->is_trash()) { + _heap->decrease_used(r->used()); + r->recycle(); + } +} + +void ShenandoahFreeSet::recycle_trash() { + // lock is not reentrable, check we don't have it + assert_heaplock_not_owned_by_current_thread(); + + for (size_t i = 0; i < _heap->num_regions(); i++) { + ShenandoahHeapRegion* r = _heap->get_region(i); + if (r->is_trash()) { + ShenandoahHeapLocker locker(_heap->lock()); + try_recycle_trashed(r); + } + SpinPause(); // allow allocators to take the lock + } +} + +void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { + size_t idx = r->region_number(); + + assert(_mutator_free_bitmap.at(idx), "Should be in mutator view"); + assert(is_empty_or_trash(r), "Should not be allocated"); + + _mutator_free_bitmap.clear_bit(idx); + _collector_free_bitmap.set_bit(idx); + _collector_leftmost = MIN2(idx, _collector_leftmost); + _collector_rightmost = MAX2(idx, _collector_rightmost); + + _capacity -= alloc_capacity(r); + + if (touches_bounds(idx)) { + adjust_bounds(); + } + assert_bounds(); +} + +void ShenandoahFreeSet::clear() { + assert_heaplock_owned_by_current_thread(); + clear_internal(); +} + +void ShenandoahFreeSet::clear_internal() { + _mutator_free_bitmap.clear(); + _collector_free_bitmap.clear(); + _mutator_leftmost = _max; + _mutator_rightmost = 0; + _collector_leftmost = _max; + _collector_rightmost = 0; + _capacity = 0; + _used = 0; +} + +void ShenandoahFreeSet::rebuild() { + assert_heaplock_owned_by_current_thread(); + clear(); + + for (size_t idx = 0; idx < _heap->num_regions(); idx++) { + ShenandoahHeapRegion* region = _heap->get_region(idx); + if (region->is_alloc_allowed() || region->is_trash()) { + assert(!region->is_cset(), "Shouldn't be adding those to the free set"); + + // Do not add regions that would surely fail allocation + if (has_no_alloc_capacity(region)) continue; + + _capacity += alloc_capacity(region); + assert(_used <= _capacity, "must not use more than we have"); + + assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already"); + _mutator_free_bitmap.set_bit(idx); + } + } + + // Evac reserve: reserve trailing space for evacuations + size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve; + size_t reserved = 0; + + for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) { + if (reserved >= to_reserve) break; + + ShenandoahHeapRegion* region = _heap->get_region(idx); + if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) { + _mutator_free_bitmap.clear_bit(idx); + _collector_free_bitmap.set_bit(idx); + size_t ac = alloc_capacity(region); + _capacity -= ac; + reserved += ac; + } + } + + recompute_bounds(); + assert_bounds(); +} + +void ShenandoahFreeSet::log_status() { + assert_heaplock_owned_by_current_thread(); + + LogTarget(Info, gc, ergo) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + + { + size_t last_idx = 0; + size_t max = 0; + size_t max_contig = 0; + size_t empty_contig = 0; + + size_t total_used = 0; + size_t total_free = 0; + + for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { + if (is_mutator_free(idx)) { + ShenandoahHeapRegion *r = _heap->get_region(idx); + size_t free = alloc_capacity(r); + + max = MAX2(max, free); + + if (r->is_empty() && (last_idx + 1 == idx)) { + empty_contig++; + } else { + empty_contig = 0; + } + + total_used += r->used(); + total_free += free; + + max_contig = MAX2(max_contig, empty_contig); + last_idx = idx; + } + } + + size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); + size_t free = capacity() - used(); + + ls.print("Free: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s, Max humongous: " SIZE_FORMAT "%s, ", + byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), + mutator_count(), + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), + byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) + ); + + size_t frag_ext; + if (free > 0) { + frag_ext = 100 - (100 * max_humongous / free); + } else { + frag_ext = 0; + } + ls.print("External frag: " SIZE_FORMAT "%%, ", frag_ext); + + size_t frag_int; + if (mutator_count() > 0) { + frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes()); + } else { + frag_int = 0; + } + ls.print("Internal frag: " SIZE_FORMAT "%%", frag_int); + ls.cr(); + } + + { + size_t max = 0; + size_t total_free = 0; + + for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) { + if (is_collector_free(idx)) { + ShenandoahHeapRegion *r = _heap->get_region(idx); + size_t free = alloc_capacity(r); + max = MAX2(max, free); + total_free += free; + } + } + + ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), + collector_count(), + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); + } + } +} + +HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { + assert_heaplock_owned_by_current_thread(); + assert_bounds(); + + if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { + switch (req.type()) { + case ShenandoahAllocRequest::_alloc_shared: + case ShenandoahAllocRequest::_alloc_shared_gc: + in_new_region = true; + return allocate_contiguous(req); + case ShenandoahAllocRequest::_alloc_gclab: + case ShenandoahAllocRequest::_alloc_tlab: + in_new_region = false; + assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, + req.size(), ShenandoahHeapRegion::humongous_threshold_words()); + return NULL; + default: + ShouldNotReachHere(); + return NULL; + } + } else { + return allocate_single(req, in_new_region); + } +} + +size_t ShenandoahFreeSet::unsafe_peek_free() const { + // Deliberately not locked, this method is unsafe when free set is modified. + + for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { + if (index < _max && is_mutator_free(index)) { + ShenandoahHeapRegion* r = _heap->get_region(index); + if (r->free() >= MinTLABSize) { + return r->free(); + } + } + } + + // It appears that no regions left + return 0; +} + +void ShenandoahFreeSet::print_on(outputStream* out) const { + out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count()); + for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { + if (is_mutator_free(index)) { + _heap->get_region(index)->print_on(out); + } + } + out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count()); + for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) { + if (is_collector_free(index)) { + _heap->get_region(index)->print_on(out); + } + } +} + +#ifdef ASSERT +void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const { + _heap->assert_heaplock_owned_by_current_thread(); +} + +void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const { + _heap->assert_heaplock_not_owned_by_current_thread(); +} + +void ShenandoahFreeSet::assert_bounds() const { + // Performance invariants. Failing these would not break the free set, but performance + // would suffer. + assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max); + assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max); + + assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost); + assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost); + + size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0); + size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1); + assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost); + assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost); + + assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max); + assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max); + + assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost); + assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost); + + beg_off = _collector_free_bitmap.get_next_one_offset(0); + end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1); + assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost); + assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost); +} +#endif --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp 2020-01-17 17:10:06.161130697 +0100 @@ -0,0 +1,98 @@ + +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP + +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" + +class ShenandoahFreeSet : public CHeapObj { +private: + ShenandoahHeap* const _heap; + CHeapBitMap _mutator_free_bitmap; + CHeapBitMap _collector_free_bitmap; + size_t _max; + + // Left-most and right-most region indexes. There are no free regions outside + // of [left-most; right-most] index intervals + size_t _mutator_leftmost, _mutator_rightmost; + size_t _collector_leftmost, _collector_rightmost; + + size_t _capacity; + size_t _used; + + void assert_bounds() const NOT_DEBUG_RETURN; + void assert_heaplock_owned_by_current_thread() const NOT_DEBUG_RETURN; + void assert_heaplock_not_owned_by_current_thread() const NOT_DEBUG_RETURN; + + bool is_mutator_free(size_t idx) const; + bool is_collector_free(size_t idx) const; + + HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); + HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region); + HeapWord* allocate_contiguous(ShenandoahAllocRequest& req); + + void flip_to_gc(ShenandoahHeapRegion* r); + + void recompute_bounds(); + void adjust_bounds(); + bool touches_bounds(size_t num) const; + + void increase_used(size_t amount); + void clear_internal(); + + size_t collector_count() const { return _collector_free_bitmap.count_one_bits(); } + size_t mutator_count() const { return _mutator_free_bitmap.count_one_bits(); } + + void try_recycle_trashed(ShenandoahHeapRegion *r); + + bool is_empty_or_trash(ShenandoahHeapRegion *r); + size_t alloc_capacity(ShenandoahHeapRegion *r); + bool has_no_alloc_capacity(ShenandoahHeapRegion *r); + +public: + ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); + + void clear(); + void rebuild(); + + void recycle_trash(); + + void log_status(); + + size_t capacity() const { return _capacity; } + size_t used() const { return _used; } + size_t available() const { + assert(_used <= _capacity, "must use less than capacity"); + return _capacity - _used; + } + + HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); + size_t unsafe_peek_free() const; + + void print_on(outputStream* out) const; +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-01-17 17:10:06.766130663 +0100 @@ -0,0 +1,2904 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/allocation.hpp" + +#include "gc/shared/gcTimer.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/memAllocator.hpp" +#include "gc/shared/parallelCleaning.hpp" +#include "gc/shared/plab.hpp" + +#include "gc/shenandoah/shenandoahAllocTracker.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" +#include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahMarkCompact.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahMemoryPool.hpp" +#include "gc/shenandoah/shenandoahMetrics.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahNormalMode.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahPacer.inline.hpp" +#include "gc/shenandoah/shenandoahPassiveMode.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "gc/shenandoah/shenandoahTraversalMode.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahCodeRoots.hpp" +#include "gc/shenandoah/shenandoahVMOperations.hpp" +#include "gc/shenandoah/shenandoahWorkGroup.hpp" +#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#if INCLUDE_JFR +#include "gc/shenandoah/shenandoahJfrSupport.hpp" +#endif + +#include "memory/metaspace.hpp" +#include "runtime/vmThread.hpp" +#include "services/mallocTracker.hpp" + +#ifdef ASSERT +template +void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (! CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + shenandoah_assert_not_forwarded(p, obj); + } +} + +void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); } +void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); } +#endif + +class ShenandoahPretouchHeapTask : public AbstractGangTask { +private: + ShenandoahRegionIterator _regions; + const size_t _page_size; +public: + ShenandoahPretouchHeapTask(size_t page_size) : + AbstractGangTask("Shenandoah Pretouch Heap"), + _page_size(page_size) {} + + virtual void work(uint worker_id) { + ShenandoahHeapRegion* r = _regions.next(); + while (r != NULL) { + os::pretouch_memory(r->bottom(), r->end(), _page_size); + r = _regions.next(); + } + } +}; + +class ShenandoahPretouchBitmapTask : public AbstractGangTask { +private: + ShenandoahRegionIterator _regions; + char* _bitmap_base; + const size_t _bitmap_size; + const size_t _page_size; +public: + ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : + AbstractGangTask("Shenandoah Pretouch Bitmap"), + _bitmap_base(bitmap_base), + _bitmap_size(bitmap_size), + _page_size(page_size) {} + + virtual void work(uint worker_id) { + ShenandoahHeapRegion* r = _regions.next(); + while (r != NULL) { + size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); + size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); + assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); + + os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size); + + r = _regions.next(); + } + } +}; + +jint ShenandoahHeap::initialize() { + initialize_heuristics(); + + // + // Figure out heap sizing + // + + size_t init_byte_size = collector_policy()->initial_heap_byte_size(); + size_t min_byte_size = collector_policy()->min_heap_byte_size(); + size_t max_byte_size = collector_policy()->max_heap_byte_size(); + size_t heap_alignment = collector_policy()->heap_alignment(); + + size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + if (ShenandoahAlwaysPreTouch) { + // Enabled pre-touch means the entire heap is committed right away. + init_byte_size = max_byte_size; + } + + Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap"); + Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap"); + + _num_regions = ShenandoahHeapRegion::region_count(); + + size_t num_committed_regions = init_byte_size / reg_size_bytes; + num_committed_regions = MIN2(num_committed_regions, _num_regions); + assert(num_committed_regions <= _num_regions, "sanity"); + _initial_size = num_committed_regions * reg_size_bytes; + + size_t num_min_regions = min_byte_size / reg_size_bytes; + num_min_regions = MIN2(num_min_regions, _num_regions); + assert(num_min_regions <= _num_regions, "sanity"); + _minimum_size = num_min_regions * reg_size_bytes; + + _committed = _initial_size; + + size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); + size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); + + // + // Reserve and commit memory for heap + // + + ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); + initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); + _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize); + _heap_region_special = heap_rs.special(); + + assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, + "Misaligned heap: " PTR_FORMAT, p2i(base())); + +#if SHENANDOAH_OPTIMIZED_OBJTASK + // The optimized ObjArrayChunkedTask takes some bits away from the full object bits. + // Fail if we ever attempt to address more than we can. + if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) { + FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n" + "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n" + "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).", + p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable()); + vm_exit_during_initialization("Fatal Error", buf); + } +#endif + + ReservedSpace sh_rs = heap_rs.first_part(max_byte_size); + if (!_heap_region_special) { + os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false, + "Cannot commit heap memory"); + } + + // + // Reserve and commit memory for bitmap(s) + // + + _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); + _bitmap_size = align_up(_bitmap_size, bitmap_page_size); + + size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); + + guarantee(bitmap_bytes_per_region != 0, + "Bitmap bytes per region should not be zero"); + guarantee(is_power_of_2(bitmap_bytes_per_region), + "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); + + if (bitmap_page_size > bitmap_bytes_per_region) { + _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; + _bitmap_bytes_per_slice = bitmap_page_size; + } else { + _bitmap_regions_per_slice = 1; + _bitmap_bytes_per_slice = bitmap_bytes_per_region; + } + + guarantee(_bitmap_regions_per_slice >= 1, + "Should have at least one region per slice: " SIZE_FORMAT, + _bitmap_regions_per_slice); + + guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, + "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, + _bitmap_bytes_per_slice, bitmap_page_size); + + ReservedSpace bitmap(_bitmap_size, bitmap_page_size); + MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); + _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); + _bitmap_region_special = bitmap.special(); + + size_t bitmap_init_commit = _bitmap_bytes_per_slice * + align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; + bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); + if (!_bitmap_region_special) { + os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false, + "Cannot commit bitmap memory"); + } + + _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); + + if (ShenandoahVerify) { + ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); + if (!verify_bitmap.special()) { + os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, + "Cannot commit verification bitmap memory"); + } + MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); + MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); + _verification_bit_map.initialize(_heap_region, verify_bitmap_region); + _verifier = new ShenandoahVerifier(this, &_verification_bit_map); + } + + // Reserve aux bitmap for use in object_iterate(). We don't commit it here. + ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); + MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); + _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); + _aux_bitmap_region_special = aux_bitmap.special(); + _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); + + // + // Create regions and region sets + // + + _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); + _free_set = new ShenandoahFreeSet(this, _num_regions); + _collection_set = new ShenandoahCollectionSet(this, sh_rs.base(), sh_rs.size()); + + { + ShenandoahHeapLocker locker(lock()); + + size_t size_words = ShenandoahHeapRegion::region_size_words(); + + for (size_t i = 0; i < _num_regions; i++) { + HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i; + bool is_committed = i < num_committed_regions; + ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed); + + _marking_context->initialize_top_at_mark_start(r); + _regions[i] = r; + assert(!collection_set()->is_in(i), "New region should not be in collection set"); + } + + // Initialize to complete + _marking_context->mark_complete(); + + _free_set->rebuild(); + } + + if (ShenandoahAlwaysPreTouch) { + assert(!AlwaysPreTouch, "Should have been overridden"); + + // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, + // before initialize() below zeroes it with initializing thread. For any given region, + // we touch the region and the corresponding bitmaps from the same thread. + ShenandoahPushWorkerScope scope(workers(), _max_workers, false); + + size_t pretouch_heap_page_size = heap_page_size; + size_t pretouch_bitmap_page_size = bitmap_page_size; + +#ifdef LINUX + // UseTransparentHugePages would madvise that backing memory can be coalesced into huge + // pages. But, the kernel needs to know that every small page is used, in order to coalesce + // them into huge one. Therefore, we need to pretouch with smaller pages. + if (UseTransparentHugePages) { + pretouch_heap_page_size = (size_t)os::vm_page_size(); + pretouch_bitmap_page_size = (size_t)os::vm_page_size(); + } +#endif + + // OS memory managers may want to coalesce back-to-back pages. Make their jobs + // simpler by pre-touching continuous spaces (heap and bitmap) separately. + + log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", + _num_regions, pretouch_bitmap_page_size); + ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size); + _workers->run_task(&bcl); + + log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", + _num_regions, pretouch_heap_page_size); + ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size); + _workers->run_task(&hcl); + } + + // + // Initialize the rest of GC subsystems + // + + BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this)); + + _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC); + for (uint worker = 0; worker < _max_workers; worker++) { + _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC); + Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); + } + + // The call below uses stuff (the SATB* things) that are in G1, but probably + // belong into a shared location. + ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, + SATB_Q_FL_lock, + 20 /*G1SATBProcessCompletedThreshold */, + Shared_SATB_Q_lock); + + _monitoring_support = new ShenandoahMonitoringSupport(this); + _phase_timings = new ShenandoahPhaseTimings(); + ShenandoahStringDedup::initialize(); + ShenandoahCodeRoots::initialize(); + + if (ShenandoahAllocationTrace) { + _alloc_tracker = new ShenandoahAllocTracker(); + } + + if (ShenandoahPacing) { + _pacer = new ShenandoahPacer(this); + _pacer->setup_for_idle(); + } else { + _pacer = NULL; + } + + _traversal_gc = strcmp(ShenandoahGCMode, "traversal") == 0 ? + new ShenandoahTraversalGC(this, _num_regions) : + NULL; + + _control_thread = new ShenandoahControlThread(); + + log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max", + byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size), + byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size), + byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()) + ); + + log_info(gc, init)("Safepointing mechanism: %s", + SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" : + (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown")); + + return JNI_OK; +} + +void ShenandoahHeap::initialize_heuristics() { + if (ShenandoahGCMode != NULL) { + if (strcmp(ShenandoahGCMode, "traversal") == 0) { + _gc_mode = new ShenandoahTraversalMode(); + } else if (strcmp(ShenandoahGCMode, "normal") == 0) { + _gc_mode = new ShenandoahNormalMode(); + } else if (strcmp(ShenandoahGCMode, "passive") == 0) { + _gc_mode = new ShenandoahPassiveMode(); + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option"); + } + } else { + ShouldNotReachHere(); + } + _gc_mode->initialize_flags(); + _heuristics = _gc_mode->initialize_heuristics(); + + if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { + vm_exit_during_initialization( + err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", + _heuristics->name())); + } + if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { + vm_exit_during_initialization( + err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", + _heuristics->name())); + } + log_info(gc, init)("Shenandoah heuristics: %s", + _heuristics->name()); +} + +#ifdef _MSC_VER +#pragma warning( push ) +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif + +ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : + CollectedHeap(), + _initial_size(0), + _used(0), + _committed(0), + _bytes_allocated_since_gc_start(0), + _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), + _workers(NULL), + _safepoint_workers(NULL), + _heap_region_special(false), + _num_regions(0), + _regions(NULL), + _update_refs_iterator(this), + _control_thread(NULL), + _shenandoah_policy(policy), + _heuristics(NULL), + _free_set(NULL), + _scm(new ShenandoahConcurrentMark()), + _traversal_gc(NULL), + _full_gc(new ShenandoahMarkCompact()), + _pacer(NULL), + _verifier(NULL), + _alloc_tracker(NULL), + _phase_timings(NULL), + _monitoring_support(NULL), + _memory_pool(NULL), + _stw_memory_manager("Shenandoah Pauses", "end of GC pause"), + _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"), + _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _soft_ref_policy(), + _ref_processor(NULL), + _marking_context(NULL), + _bitmap_size(0), + _bitmap_regions_per_slice(0), + _bitmap_bytes_per_slice(0), + _bitmap_region_special(false), + _aux_bitmap_region_special(false), + _liveness_cache(NULL), + _collection_set(NULL) +{ + log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads); + log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial"); + + _max_workers = MAX2(_max_workers, 1U); + _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, + /* are_GC_task_threads */ true, + /* are_ConcurrentGC_threads */ true); + if (_workers == NULL) { + vm_exit_during_initialization("Failed necessary allocation."); + } else { + _workers->initialize_workers(); + } + + if (ShenandoahParallelSafepointThreads > 1) { + _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread", + ShenandoahParallelSafepointThreads, + /* are_GC_task_threads */ false, + /* are_ConcurrentGC_threads */ false); + _safepoint_workers->initialize_workers(); + } +} + +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + +class ShenandoahResetBitmapTask : public AbstractGangTask { +private: + ShenandoahRegionIterator _regions; + +public: + ShenandoahResetBitmapTask() : + AbstractGangTask("Parallel Reset Bitmap Task") {} + + void work(uint worker_id) { + ShenandoahHeapRegion* region = _regions.next(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* const ctx = heap->marking_context(); + while (region != NULL) { + if (heap->is_bitmap_slice_committed(region)) { + ctx->clear_bitmap(region); + } + region = _regions.next(); + } + } +}; + +void ShenandoahHeap::reset_mark_bitmap() { + assert_gc_workers(_workers->active_workers()); + mark_incomplete_marking_context(); + + ShenandoahResetBitmapTask task; + _workers->run_task(&task); +} + +void ShenandoahHeap::print_on(outputStream* st) const { + st->print_cr("Shenandoah Heap"); + st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", + byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()), + byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()), + byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); + st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions", + num_regions(), + byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()), + proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); + + st->print("Status: "); + if (has_forwarded_objects()) st->print("has forwarded objects, "); + if (is_concurrent_mark_in_progress()) st->print("marking, "); + if (is_evacuation_in_progress()) st->print("evacuating, "); + if (is_update_refs_in_progress()) st->print("updating refs, "); + if (is_concurrent_traversal_in_progress()) st->print("traversal, "); + if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); + if (is_full_gc_in_progress()) st->print("full gc, "); + if (is_full_gc_move_in_progress()) st->print("full gc move, "); + + if (cancelled_gc()) { + st->print("cancelled"); + } else { + st->print("not cancelled"); + } + st->cr(); + + st->print_cr("Reserved region:"); + st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", + p2i(reserved_region().start()), + p2i(reserved_region().end())); + + ShenandoahCollectionSet* cset = collection_set(); + st->print_cr("Collection set:"); + if (cset != NULL) { + st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address())); + st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address())); + } else { + st->print_cr(" (NULL)"); + } + + st->cr(); + MetaspaceUtils::print_on(st); + + if (Verbose) { + print_heap_regions_on(st); + } +} + +class ShenandoahInitWorkerGCLABClosure : public ThreadClosure { +public: + void do_thread(Thread* thread) { + assert(thread != NULL, "Sanity"); + assert(thread->is_Worker_thread(), "Only worker thread expected"); + ShenandoahThreadLocalData::initialize_gclab(thread); + } +}; + +void ShenandoahHeap::post_initialize() { + CollectedHeap::post_initialize(); + MutexLocker ml(Threads_lock); + + ShenandoahInitWorkerGCLABClosure init_gclabs; + _workers->threads_do(&init_gclabs); + + // gclab can not be initialized early during VM startup, as it can not determinate its max_size. + // Now, we will let WorkGang to initialize gclab when new worker is created. + _workers->set_initialize_gclab(); + + _scm->initialize(_max_workers); + _full_gc->initialize(_gc_timer); + + ref_processing_init(); + + _heuristics->initialize(); + + JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers()); +} + +size_t ShenandoahHeap::used() const { + return OrderAccess::load_acquire(&_used); +} + +size_t ShenandoahHeap::committed() const { + OrderAccess::acquire(); + return _committed; +} + +void ShenandoahHeap::increase_committed(size_t bytes) { + assert_heaplock_or_safepoint(); + _committed += bytes; +} + +void ShenandoahHeap::decrease_committed(size_t bytes) { + assert_heaplock_or_safepoint(); + _committed -= bytes; +} + +void ShenandoahHeap::increase_used(size_t bytes) { + Atomic::add(bytes, &_used); +} + +void ShenandoahHeap::set_used(size_t bytes) { + OrderAccess::release_store_fence(&_used, bytes); +} + +void ShenandoahHeap::decrease_used(size_t bytes) { + assert(used() >= bytes, "never decrease heap size by more than we've left"); + Atomic::sub(bytes, &_used); +} + +void ShenandoahHeap::increase_allocated(size_t bytes) { + Atomic::add(bytes, &_bytes_allocated_since_gc_start); +} + +void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { + size_t bytes = words * HeapWordSize; + if (!waste) { + increase_used(bytes); + } + increase_allocated(bytes); + if (ShenandoahPacing) { + control_thread()->pacing_notify_alloc(words); + if (waste) { + pacer()->claim_for_alloc(words, true); + } + } +} + +size_t ShenandoahHeap::capacity() const { + return committed(); +} + +size_t ShenandoahHeap::max_capacity() const { + return _num_regions * ShenandoahHeapRegion::region_size_bytes(); +} + +size_t ShenandoahHeap::min_capacity() const { + return _minimum_size; +} + +size_t ShenandoahHeap::initial_capacity() const { + return _initial_size; +} + +bool ShenandoahHeap::is_in(const void* p) const { + HeapWord* heap_base = (HeapWord*) base(); + HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); + return p >= heap_base && p < last_region_end; +} + +void ShenandoahHeap::op_uncommit(double shrink_before) { + assert (ShenandoahUncommit, "should be enabled"); + + // Application allocates from the beginning of the heap, and GC allocates at + // the end of it. It is more efficient to uncommit from the end, so that applications + // could enjoy the near committed regions. GC allocations are much less frequent, + // and therefore can accept the committing costs. + + size_t count = 0; + for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow + ShenandoahHeapRegion* r = get_region(i - 1); + if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { + ShenandoahHeapLocker locker(lock()); + if (r->is_empty_committed()) { + // Do not uncommit below minimal capacity + if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) { + break; + } + + r->make_uncommitted(); + count++; + } + } + SpinPause(); // allow allocators to take the lock + } + + if (count > 0) { + control_thread()->notify_heap_changed(); + } +} + +HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { + // New object should fit the GCLAB size + size_t min_size = MAX2(size, PLAB::min_size()); + + // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively. + size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2; + new_size = MIN2(new_size, PLAB::max_size()); + new_size = MAX2(new_size, PLAB::min_size()); + + // Record new heuristic value even if we take any shortcut. This captures + // the case when moderately-sized objects always take a shortcut. At some point, + // heuristics should catch up with them. + ShenandoahThreadLocalData::set_gclab_size(thread, new_size); + + if (new_size < size) { + // New size still does not fit the object. Fall back to shared allocation. + // This avoids retiring perfectly good GCLABs, when we encounter a large object. + return NULL; + } + + // Retire current GCLAB, and allocate a new one. + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + gclab->retire(); + + size_t actual_size = 0; + HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size); + if (gclab_buf == NULL) { + return NULL; + } + + assert (size <= actual_size, "allocation should fit"); + + if (ZeroTLAB) { + // ..and clear it. + Copy::zero_to_words(gclab_buf, actual_size); + } else { + // ...and zap just allocated object. +#ifdef ASSERT + // Skip mangling the space corresponding to the object header to + // ensure that the returned space is not considered parsable by + // any concurrent GC thread. + size_t hdr_size = oopDesc::header_size(); + Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); +#endif // ASSERT + } + gclab->set_buf(gclab_buf, actual_size); + return gclab->allocate(size); +} + +HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size, + size_t requested_size, + size_t* actual_size) { + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size); + HeapWord* res = allocate_memory(req); + if (res != NULL) { + *actual_size = req.actual_size(); + } else { + *actual_size = 0; + } + return res; +} + +HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, + size_t word_size, + size_t* actual_size) { + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); + HeapWord* res = allocate_memory(req); + if (res != NULL) { + *actual_size = req.actual_size(); + } else { + *actual_size = 0; + } + return res; +} + +ShenandoahHeap* ShenandoahHeap::heap() { + CollectedHeap* heap = Universe::heap(); + assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); + assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap"); + return (ShenandoahHeap*) heap; +} + +ShenandoahHeap* ShenandoahHeap::heap_no_check() { + CollectedHeap* heap = Universe::heap(); + return (ShenandoahHeap*) heap; +} + +HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { + ShenandoahAllocTrace trace_alloc(req.size(), req.type()); + + intptr_t pacer_epoch = 0; + bool in_new_region = false; + HeapWord* result = NULL; + + if (req.is_mutator_alloc()) { + if (ShenandoahPacing) { + pacer()->pace_for_alloc(req.size()); + pacer_epoch = pacer()->epoch(); + } + + if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { + result = allocate_memory_under_lock(req, in_new_region); + } + + // Allocation failed, block until control thread reacted, then retry allocation. + // + // It might happen that one of the threads requesting allocation would unblock + // way later after GC happened, only to fail the second allocation, because + // other threads have already depleted the free storage. In this case, a better + // strategy is to try again, as long as GC makes progress. + // + // Then, we need to make sure the allocation was retried after at least one + // Full GC, which means we want to try more than ShenandoahFullGCThreshold times. + + size_t tries = 0; + + while (result == NULL && _progress_last_gc.is_set()) { + tries++; + control_thread()->handle_alloc_failure(req.size()); + result = allocate_memory_under_lock(req, in_new_region); + } + + while (result == NULL && tries <= ShenandoahFullGCThreshold) { + tries++; + control_thread()->handle_alloc_failure(req.size()); + result = allocate_memory_under_lock(req, in_new_region); + } + + } else { + assert(req.is_gc_alloc(), "Can only accept GC allocs here"); + result = allocate_memory_under_lock(req, in_new_region); + // Do not call handle_alloc_failure() here, because we cannot block. + // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac(). + } + + if (in_new_region) { + control_thread()->notify_heap_changed(); + } + + if (result != NULL) { + size_t requested = req.size(); + size_t actual = req.actual_size(); + + assert (req.is_lab_alloc() || (requested == actual), + "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, + ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual); + + if (req.is_mutator_alloc()) { + notify_mutator_alloc_words(actual, false); + + // If we requested more than we were granted, give the rest back to pacer. + // This only matters if we are in the same pacing epoch: do not try to unpace + // over the budget for the other phase. + if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { + pacer()->unpace_for_alloc(pacer_epoch, requested - actual); + } + } else { + increase_used(actual*HeapWordSize); + } + } + + return result; +} + +HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { + ShenandoahHeapLocker locker(lock()); + return _free_set->allocate(req, in_new_region); +} + +HeapWord* ShenandoahHeap::mem_allocate(size_t size, + bool* gc_overhead_limit_was_exceeded) { + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); + return allocate_memory(req); +} + +MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype) { + MetaWord* result; + + // Inform metaspace OOM to GC heuristics if class unloading is possible. + if (heuristics()->can_unload_classes()) { + ShenandoahHeuristics* h = heuristics(); + h->record_metaspace_oom(); + } + + // Expand and retry allocation + result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); + if (result != NULL) { + return result; + } + + // Start full GC + collect(GCCause::_metadata_GC_clear_soft_refs); + + // Retry allocation + result = loader_data->metaspace_non_null()->allocate(size, mdtype); + if (result != NULL) { + return result; + } + + // Expand and retry allocation + result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); + if (result != NULL) { + return result; + } + + // Out of memory + return NULL; +} + +class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { +private: + ShenandoahHeap* const _heap; + Thread* const _thread; +public: + ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) : + _heap(heap), _thread(Thread::current()) {} + + void do_object(oop p) { + shenandoah_assert_marked(NULL, p); + if (!p->is_forwarded()) { + _heap->evacuate_object(p, _thread); + } + } +}; + +class ShenandoahEvacuationTask : public AbstractGangTask { +private: + ShenandoahHeap* const _sh; + ShenandoahCollectionSet* const _cs; + bool _concurrent; +public: + ShenandoahEvacuationTask(ShenandoahHeap* sh, + ShenandoahCollectionSet* cs, + bool concurrent) : + AbstractGangTask("Parallel Evacuation Task"), + _sh(sh), + _cs(cs), + _concurrent(concurrent) + {} + + void work(uint worker_id) { + if (_concurrent) { + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); + ShenandoahEvacOOMScope oom_evac_scope; + do_work(); + } else { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahEvacOOMScope oom_evac_scope; + do_work(); + } + } + +private: + void do_work() { + ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); + ShenandoahHeapRegion* r; + while ((r =_cs->claim_next()) != NULL) { + assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->region_number()); + _sh->marked_object_iterate(r, &cl); + + if (ShenandoahPacing) { + _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); + } + + if (_sh->check_cancelled_gc_and_yield(_concurrent)) { + break; + } + } + } +}; + +void ShenandoahHeap::trash_cset_regions() { + ShenandoahHeapLocker locker(lock()); + + ShenandoahCollectionSet* set = collection_set(); + ShenandoahHeapRegion* r; + set->clear_current_index(); + while ((r = set->next()) != NULL) { + r->make_trash(); + } + collection_set()->clear(); +} + +void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { + st->print_cr("Heap Regions:"); + st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); + st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); + st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)"); + st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)"); + + for (size_t i = 0; i < num_regions(); i++) { + get_region(i)->print_on(st); + } +} + +void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { + assert(start->is_humongous_start(), "reclaim regions starting with the first one"); + + oop humongous_obj = oop(start->bottom()); + size_t size = humongous_obj->size(); + size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + size_t index = start->region_number() + required_regions - 1; + + assert(!start->has_live(), "liveness must be zero"); + + for(size_t i = 0; i < required_regions; i++) { + // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, + // as it expects that every region belongs to a humongous region starting with a humongous start region. + ShenandoahHeapRegion* region = get_region(index --); + + assert(region->is_humongous(), "expect correct humongous start or continuation"); + assert(!region->is_cset(), "Humongous region should not be in collection set"); + + region->make_trash_immediate(); + } +} + +class ShenandoahRetireGCLABClosure : public ThreadClosure { +public: + void do_thread(Thread* thread) { + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name()); + gclab->retire(); + } +}; + +void ShenandoahHeap::make_parsable(bool retire_tlabs) { + if (UseTLAB) { + CollectedHeap::ensure_parsability(retire_tlabs); + } + ShenandoahRetireGCLABClosure cl; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + cl.do_thread(t); + } + workers()->threads_do(&cl); +} + +void ShenandoahHeap::resize_tlabs() { + CollectedHeap::resize_all_tlabs(); +} + +void ShenandoahHeap::accumulate_statistics_tlabs() { + CollectedHeap::accumulate_statistics_all_tlabs(); +} + +class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { +private: + ShenandoahRootEvacuator* _rp; + +public: + ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : + AbstractGangTask("Shenandoah evacuate and update roots"), + _rp(rp) {} + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahEvacOOMScope oom_evac_scope; + ShenandoahEvacuateUpdateRootsClosure cl; + MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); + _rp->roots_do(worker_id, &cl); + } +}; + +void ShenandoahHeap::evacuate_and_update_roots() { +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); + + { + ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac); + ShenandoahEvacuateUpdateRootsTask roots_task(&rp); + workers()->run_task(&roots_task); + } + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif +} + +// Returns size in bytes +size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { + if (ShenandoahElasticTLAB) { + // With Elastic TLABs, return the max allowed size, and let the allocation path + // figure out the safe size for current allocation. + return ShenandoahHeapRegion::max_tlab_size_bytes(); + } else { + return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes()); + } +} + +size_t ShenandoahHeap::max_tlab_size() const { + // Returns size in words + return ShenandoahHeapRegion::max_tlab_size_words(); +} + +class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure { +public: + void do_thread(Thread* thread) { + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + gclab->retire(); + if (ShenandoahThreadLocalData::gclab_size(thread) > 0) { + ShenandoahThreadLocalData::set_gclab_size(thread, 0); + } + } +}; + +void ShenandoahHeap::retire_and_reset_gclabs() { + ShenandoahRetireAndResetGCLABClosure cl; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + cl.do_thread(t); + } + workers()->threads_do(&cl); +} + +void ShenandoahHeap::collect(GCCause::Cause cause) { + control_thread()->request_gc(cause); +} + +void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { + //assert(false, "Shouldn't need to do full collections"); +} + +CollectorPolicy* ShenandoahHeap::collector_policy() const { + return _shenandoah_policy; +} + +HeapWord* ShenandoahHeap::block_start(const void* addr) const { + Space* sp = heap_region_containing(addr); + if (sp != NULL) { + return sp->block_start(addr); + } + return NULL; +} + +size_t ShenandoahHeap::block_size(const HeapWord* addr) const { + Space* sp = heap_region_containing(addr); + assert(sp != NULL, "block_size of address outside of heap"); + return sp->block_size(addr); +} + +bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { + Space* sp = heap_region_containing(addr); + return sp->block_is_obj(addr); +} + +jlong ShenandoahHeap::millis_since_last_gc() { + double v = heuristics()->time_since_last_gc() * 1000; + assert(0 <= v && v <= max_jlong, "value should fit: %f", v); + return (jlong)v; +} + +void ShenandoahHeap::prepare_for_verify() { + if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { + make_parsable(false); + } +} + +void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { + workers()->print_worker_threads_on(st); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::print_worker_threads_on(st); + } +} + +void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { + workers()->threads_do(tcl); + if (_safepoint_workers != NULL) { + _safepoint_workers->threads_do(tcl); + } + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::threads_do(tcl); + } +} + +void ShenandoahHeap::print_tracing_info() const { + LogTarget(Info, gc, stats) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + + phase_timings()->print_on(&ls); + + ls.cr(); + ls.cr(); + + shenandoah_policy()->print_gc_stats(&ls); + + ls.cr(); + ls.cr(); + + if (ShenandoahPacing) { + pacer()->print_on(&ls); + } + + ls.cr(); + ls.cr(); + + if (ShenandoahAllocationTrace) { + assert(alloc_tracker() != NULL, "Must be"); + alloc_tracker()->print_on(&ls); + } else { + ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable."); + } + } +} + +void ShenandoahHeap::verify(VerifyOption vo) { + if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { + if (ShenandoahVerify) { + verifier()->verify_generic(vo); + } else { + // TODO: Consider allocating verification bitmaps on demand, + // and turn this on unconditionally. + } + } +} +size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { + return _free_set->capacity(); +} + +class ObjectIterateScanRootClosure : public BasicOopIterateClosure { +private: + MarkBitMap* _bitmap; + Stack* _oop_stack; + + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + if (fwd == NULL) { + // There is an odd interaction with VM_HeapWalkOperation, see jvmtiTagMap.cpp. + // + // That operation walks the reachable objects on its own, storing the marking + // wavefront in the object marks. When it is done, it calls the CollectedHeap + // to iterate over all objects to clean up the mess. When it reaches here, + // the Shenandoah fwdptr resolution code encounters the marked objects with + // NULL forwardee. Trying to act on that would crash the VM. Or fail the + // asserts, should we go for resolve_forwarded_pointer(obj). + // + // Therefore, we have to dodge it by doing the raw access to forwardee, and + // assuming the object had no forwardee, if that thing is NULL. + } else { + obj = fwd; + } + assert(oopDesc::is_oop(obj), "must be a valid oop"); + if (!_bitmap->isMarked((HeapWord*) obj)) { + _bitmap->mark((HeapWord*) obj); + _oop_stack->push(obj); + } + } + } +public: + ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack* oop_stack) : + _bitmap(bitmap), _oop_stack(oop_stack) {} + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +/* + * This is public API, used in preparation of object_iterate(). + * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't + * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can + * control, we call SH::make_tlabs_parsable(). + */ +void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { + // No-op. +} + +/* + * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. + * + * We cannot safely iterate objects by doing a linear scan at random points in time. Linear + * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. + * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear + * scanning therefore depends on having a valid marking bitmap to support it. However, we only + * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid + * marking bitmap during marking, after aborted marking or during/after cleanup (when we just + * wiped the bitmap in preparation for next marking). + * + * For all those reasons, we implement object iteration as a single marking traversal, reporting + * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap + * is allowed to report dead objects, but is not required to do so. + */ +void ShenandoahHeap::object_iterate(ObjectClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); + if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { + log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); + return; + } + + // Reset bitmap + _aux_bit_map.clear(); + + Stack oop_stack; + + // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects. + ShenandoahHeapIterationRootScanner rp; + ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); + + // If we are unloading classes right now, we should not touch weak roots, + // on the off-chance we would evacuate them and make them live accidentally. + // In other cases, we have to scan all roots. + if (is_evacuation_in_progress() && unload_classes()) { + rp.strong_roots_do(&oops); + } else { + rp.roots_do(&oops); + } + + // Work through the oop stack to traverse heap. + while (! oop_stack.is_empty()) { + oop obj = oop_stack.pop(); + assert(oopDesc::is_oop(obj), "must be a valid oop"); + cl->do_object(obj); + obj->oop_iterate(&oops); + } + + assert(oop_stack.is_empty(), "should be empty"); + + if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { + log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); + } +} + +void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); + object_iterate(cl); +} + +void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { + for (size_t i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* current = get_region(i); + blk->heap_region_do(current); + } +} + +class ShenandoahParallelHeapRegionTask : public AbstractGangTask { +private: + ShenandoahHeap* const _heap; + ShenandoahHeapRegionClosure* const _blk; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _index; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) : + AbstractGangTask("Parallel Region Task"), + _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {} + + void work(uint worker_id) { + size_t stride = ShenandoahParallelRegionStride; + + size_t max = _heap->num_regions(); + while (_index < max) { + size_t cur = Atomic::add(stride, &_index) - stride; + size_t start = cur; + size_t end = MIN2(cur + stride, max); + if (start >= max) break; + + for (size_t i = cur; i < end; i++) { + ShenandoahHeapRegion* current = _heap->get_region(i); + _blk->heap_region_do(current); + } + } + } +}; + +void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { + assert(blk->is_thread_safe(), "Only thread-safe closures here"); + if (num_regions() > ShenandoahParallelRegionStride) { + ShenandoahParallelHeapRegionTask task(blk); + workers()->run_task(&task); + } else { + heap_region_iterate(blk); + } +} + +class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahMarkingContext* const _ctx; +public: + ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion* r) { + if (r->is_active()) { + r->clear_live_data(); + _ctx->capture_top_at_mark_start(r); + } else { + assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number()); + assert(_ctx->top_at_mark_start(r) == r->top(), + "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number()); + } + } + + bool is_thread_safe() { return true; } +}; + +void ShenandoahHeap::op_init_mark() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); + assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); + + assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap"); + assert(!marking_context()->is_complete(), "should not be complete"); + + if (ShenandoahVerify) { + verifier()->verify_before_concmark(); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats); + accumulate_statistics_tlabs(); + } + + if (VerifyBeforeGC) { + Universe::verify(); + } + + set_concurrent_mark_in_progress(true); + // We need to reset all TLABs because we'd lose marks on all objects allocated in them. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); + make_parsable(true); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness); + ShenandoahClearLivenessClosure clc; + parallel_heap_region_iterate(&clc); + } + + // Make above changes visible to worker threads + OrderAccess::fence(); + + concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots); + + if (UseTLAB) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); + resize_tlabs(); + } + + if (ShenandoahPacing) { + pacer()->setup_for_mark(); + } +} + +void ShenandoahHeap::op_mark() { + concurrent_mark()->mark_from_roots(); +} + +class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahMarkingContext* const _ctx; +public: + ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion* r) { + if (r->is_active()) { + HeapWord *tams = _ctx->top_at_mark_start(r); + HeapWord *top = r->top(); + if (top > tams) { + r->increase_live_data_alloc_words(pointer_delta(top, tams)); + } + } else { + assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number()); + assert(_ctx->top_at_mark_start(r) == r->top(), + "Region " SIZE_FORMAT " should have correct TAMS", r->region_number()); + } + } + + bool is_thread_safe() { return true; } +}; + +void ShenandoahHeap::op_final_mark() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); + + // It is critical that we + // evacuate roots right after finishing marking, so that we don't + // get unmarked objects in the roots. + + if (!cancelled_gc()) { + concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false); + + if (has_forwarded_objects()) { + // Degen may be caused by failed evacuation of roots + if (is_degenerated_gc_in_progress()) { + concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots); + } else { + concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::update_roots); + } + } + + if (ShenandoahVerify) { + verifier()->verify_roots_no_forwarded(); + } + + stop_concurrent_marking(); + + // All allocations past TAMS are implicitly live, adjust the region data. + // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness); + ShenandoahCompleteLivenessClosure cl; + parallel_heap_region_iterate(&cl); + } + + // Force the threads to reacquire their TLABs outside the collection set. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs); + make_parsable(true); + } + + // We are about to select the collection set, make sure it knows about + // current pinning status. Also, this allows trashing more regions that + // now have their pinning status dropped. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::sync_pinned); + sync_pinned_region_status(); + } + + // Trash the collection set left over from previous cycle, if any. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::trash_cset); + trash_cset_regions(); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::prepare_evac); + + ShenandoahHeapLocker locker(lock()); + _collection_set->clear(); + _free_set->clear(); + + heuristics()->choose_collection_set(_collection_set); + + _free_set->rebuild(); + } + + // If collection set has candidates, start evacuation. + // Otherwise, bypass the rest of the cycle. + if (!collection_set()->is_empty()) { + ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac); + + if (ShenandoahVerify) { + verifier()->verify_before_evacuation(); + } + + set_evacuation_in_progress(true); + // From here on, we need to update references. + set_has_forwarded_objects(true); + + if (!is_degenerated_gc_in_progress()) { + evacuate_and_update_roots(); + } + + if (ShenandoahPacing) { + pacer()->setup_for_evac(); + } + + if (ShenandoahVerify) { + verifier()->verify_roots_no_forwarded(); + verifier()->verify_during_evacuation(); + } + } else { + if (ShenandoahVerify) { + verifier()->verify_after_concmark(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } + } + + } else { + concurrent_mark()->cancel(); + stop_concurrent_marking(); + + if (process_references()) { + // Abandon reference processing right away: pre-cleaning must have failed. + ReferenceProcessor *rp = ref_processor(); + rp->disable_discovery(); + rp->abandon_partial_discovery(); + rp->verify_no_references_recorded(); + } + } +} + +void ShenandoahHeap::op_final_evac() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); + + set_evacuation_in_progress(false); + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_retire_gclabs); + retire_and_reset_gclabs(); + } + + if (ShenandoahVerify) { + verifier()->verify_after_evacuation(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } +} + +void ShenandoahHeap::op_conc_evac() { + ShenandoahEvacuationTask task(this, _collection_set, true); + workers()->run_task(&task); +} + +void ShenandoahHeap::op_stw_evac() { + ShenandoahEvacuationTask task(this, _collection_set, false); + workers()->run_task(&task); +} + +void ShenandoahHeap::op_updaterefs() { + update_heap_references(true); +} + +void ShenandoahHeap::op_cleanup() { + free_set()->recycle_trash(); +} + +void ShenandoahHeap::op_reset() { + reset_mark_bitmap(); +} + +void ShenandoahHeap::op_preclean() { + concurrent_mark()->preclean_weak_refs(); +} + +void ShenandoahHeap::op_init_traversal() { + traversal_gc()->init_traversal_collection(); +} + +void ShenandoahHeap::op_traversal() { + traversal_gc()->concurrent_traversal_collection(); +} + +void ShenandoahHeap::op_final_traversal() { + traversal_gc()->final_traversal_collection(); +} + +void ShenandoahHeap::op_full(GCCause::Cause cause) { + ShenandoahMetricsSnapshot metrics; + metrics.snap_before(); + + full_gc()->do_it(cause); + if (UseTLAB) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs); + resize_all_tlabs(); + } + + metrics.snap_after(); + + if (metrics.is_good_progress()) { + _progress_last_gc.set(); + } else { + // Nothing to do. Tell the allocation path that we have failed to make + // progress, and it can finally fail. + _progress_last_gc.unset(); + } +} + +void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) { + // Degenerated GC is STW, but it can also fail. Current mechanics communicates + // GC failure via cancelled_concgc() flag. So, if we detect the failure after + // some phase, we have to upgrade the Degenerate GC to Full GC. + + clear_cancelled_gc(); + + ShenandoahMetricsSnapshot metrics; + metrics.snap_before(); + + switch (point) { + case _degenerated_traversal: + { + // Drop the collection set. Note: this leaves some already forwarded objects + // behind, which may be problematic, see comments for ShenandoahEvacAssist + // workarounds in ShenandoahTraversalHeuristics. + + ShenandoahHeapLocker locker(lock()); + collection_set()->clear_current_index(); + for (size_t i = 0; i < collection_set()->count(); i++) { + ShenandoahHeapRegion* r = collection_set()->next(); + r->make_regular_bypass(); + } + collection_set()->clear(); + } + op_final_traversal(); + op_cleanup(); + return; + + // The cases below form the Duff's-like device: it describes the actual GC cycle, + // but enters it at different points, depending on which concurrent phase had + // degenerated. + + case _degenerated_outside_cycle: + // We have degenerated from outside the cycle, which means something is bad with + // the heap, most probably heavy humongous fragmentation, or we are very low on free + // space. It makes little sense to wait for Full GC to reclaim as much as it can, when + // we can do the most aggressive degen cycle, which includes processing references and + // class unloading, unless those features are explicitly disabled. + // + // Note that we can only do this for "outside-cycle" degens, otherwise we would risk + // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. + set_process_references(heuristics()->can_process_references()); + set_unload_classes(heuristics()->can_unload_classes()); + + if (is_traversal_mode()) { + // Not possible to degenerate from here, upgrade to Full GC right away. + cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); + op_degenerated_fail(); + return; + } + + op_reset(); + + op_init_mark(); + if (cancelled_gc()) { + op_degenerated_fail(); + return; + } + + case _degenerated_mark: + op_final_mark(); + if (cancelled_gc()) { + op_degenerated_fail(); + return; + } + + op_cleanup(); + + case _degenerated_evac: + // If heuristics thinks we should do the cycle, this flag would be set, + // and we can do evacuation. Otherwise, it would be the shortcut cycle. + if (is_evacuation_in_progress()) { + + // Degeneration under oom-evac protocol might have left some objects in + // collection set un-evacuated. Restart evacuation from the beginning to + // capture all objects. For all the objects that are already evacuated, + // it would be a simple check, which is supposed to be fast. This is also + // safe to do even without degeneration, as CSet iterator is at beginning + // in preparation for evacuation anyway. + // + // Before doing that, we need to make sure we never had any cset-pinned + // regions. This may happen if allocation failure happened when evacuating + // the about-to-be-pinned object, oom-evac protocol left the object in + // the collection set, and then the pin reached the cset region. If we continue + // the cycle here, we would trash the cset and alive objects in it. To avoid + // it, we fail degeneration right away and slide into Full GC to recover. + + { + sync_pinned_region_status(); + collection_set()->clear_current_index(); + + ShenandoahHeapRegion* r; + while ((r = collection_set()->next()) != NULL) { + if (r->is_pinned()) { + cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); + op_degenerated_fail(); + return; + } + } + + collection_set()->clear_current_index(); + } + + op_stw_evac(); + if (cancelled_gc()) { + op_degenerated_fail(); + return; + } + } + + // If heuristics thinks we should do the cycle, this flag would be set, + // and we need to do update-refs. Otherwise, it would be the shortcut cycle. + if (has_forwarded_objects()) { + op_init_updaterefs(); + if (cancelled_gc()) { + op_degenerated_fail(); + return; + } + } + + case _degenerated_updaterefs: + if (has_forwarded_objects()) { + op_final_updaterefs(); + if (cancelled_gc()) { + op_degenerated_fail(); + return; + } + } + + op_cleanup(); + break; + + default: + ShouldNotReachHere(); + } + + if (ShenandoahVerify) { + verifier()->verify_after_degenerated(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } + + metrics.snap_after(); + + // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, + // because that probably means the heap is overloaded and/or fragmented. + if (!metrics.is_good_progress()) { + _progress_last_gc.unset(); + cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); + op_degenerated_futile(); + } else { + _progress_last_gc.set(); + } +} + +void ShenandoahHeap::op_degenerated_fail() { + log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); + shenandoah_policy()->record_degenerated_upgrade_to_full(); + op_full(GCCause::_shenandoah_upgrade_to_full_gc); +} + +void ShenandoahHeap::op_degenerated_futile() { + shenandoah_policy()->record_degenerated_upgrade_to_full(); + op_full(GCCause::_shenandoah_upgrade_to_full_gc); +} + +void ShenandoahHeap::stop_concurrent_marking() { + assert(is_concurrent_mark_in_progress(), "How else could we get here?"); + set_concurrent_mark_in_progress(false); + if (!cancelled_gc()) { + // If we needed to update refs, and concurrent marking has been cancelled, + // we need to finish updating references. + set_has_forwarded_objects(false); + mark_complete_marking_context(); + } +} + +void ShenandoahHeap::force_satb_flush_all_threads() { + if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) { + // No need to flush SATBs + return; + } + + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + ShenandoahThreadLocalData::set_force_satb_flush(t, true); + } + // The threads are not "acquiring" their thread-local data, but it does not + // hurt to "release" the updates here anyway. + OrderAccess::fence(); +} + +void ShenandoahHeap::set_gc_state_all_threads(char state) { + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + ShenandoahThreadLocalData::set_gc_state(t, state); + } +} + +void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint"); + _gc_state.set_cond(mask, value); + set_gc_state_all_threads(_gc_state.raw_value()); +} + +void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { + if (has_forwarded_objects()) { + set_gc_state_mask(MARKING | UPDATEREFS, in_progress); + } else { + set_gc_state_mask(MARKING, in_progress); + } + ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); +} + +void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) { + set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress); + ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); +} + +void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); + set_gc_state_mask(EVACUATION, in_progress); +} + +void ShenandoahHeap::ref_processing_init() { + assert(_max_workers > 0, "Sanity"); + + _ref_processor = + new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery + ParallelRefProcEnabled, // MT processing + _max_workers, // Degree of MT processing + true, // MT discovery + _max_workers, // Degree of MT discovery + false, // Reference discovery is not atomic + NULL, // No closure, should be installed before use + true); // Scale worker threads + + shenandoah_assert_rp_isalive_not_installed(); +} + +GCTracer* ShenandoahHeap::tracer() { + return shenandoah_policy()->tracer(); +} + +size_t ShenandoahHeap::tlab_used(Thread* thread) const { + return _free_set->used(); +} + +void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { + if (try_cancel_gc()) { + FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); + log_info(gc)("%s", msg.buffer()); + Events::log(Thread::current(), "%s", msg.buffer()); + } +} + +uint ShenandoahHeap::max_workers() { + return _max_workers; +} + +void ShenandoahHeap::stop() { + // The shutdown sequence should be able to terminate when GC is running. + + // Step 0. Notify policy to disable event recording. + _shenandoah_policy->record_shutdown(); + + // Step 1. Notify control thread that we are in shutdown. + // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. + // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. + control_thread()->prepare_for_graceful_shutdown(); + + // Step 2. Notify GC workers that we are cancelling GC. + cancel_gc(GCCause::_shenandoah_stop_vm); + + // Step 3. Wait until GC worker exits normally. + control_thread()->stop(); + + // Step 4. Stop String Dedup thread if it is active + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::stop(); + } +} + +void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { + assert(heuristics()->can_unload_classes(), "Class unloading should be enabled"); + + ShenandoahGCPhase root_phase(full_gc ? + ShenandoahPhaseTimings::full_gc_purge : + ShenandoahPhaseTimings::purge); + + ShenandoahIsAliveSelector alive; + BoolObjectClosure* is_alive = alive.is_alive_closure(); + + bool purged_class; + + // Unload classes and purge SystemDictionary. + { + ShenandoahGCPhase phase(full_gc ? + ShenandoahPhaseTimings::full_gc_purge_class_unload : + ShenandoahPhaseTimings::purge_class_unload); + purged_class = SystemDictionary::do_unloading(gc_timer(), + full_gc /* do_cleaning*/ ); + } + + { + ShenandoahGCPhase phase(full_gc ? + ShenandoahPhaseTimings::full_gc_purge_par : + ShenandoahPhaseTimings::purge_par); + uint active = _workers->active_workers(); + ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class); + _workers->run_task(&unlink_task); + } + + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahGCPhase phase(full_gc ? + ShenandoahPhaseTimings::full_gc_purge_string_dedup : + ShenandoahPhaseTimings::purge_string_dedup); + ShenandoahStringDedup::parallel_cleanup(); + } + + { + ShenandoahGCPhase phase(full_gc ? + ShenandoahPhaseTimings::full_gc_purge_cldg : + ShenandoahPhaseTimings::purge_cldg); + ClassLoaderDataGraph::purge(); + } +} + +void ShenandoahHeap::set_has_forwarded_objects(bool cond) { + set_gc_state_mask(HAS_FORWARDED, cond); +} + +void ShenandoahHeap::set_process_references(bool pr) { + _process_references.set_cond(pr); +} + +void ShenandoahHeap::set_unload_classes(bool uc) { + _unload_classes.set_cond(uc); +} + +bool ShenandoahHeap::process_references() const { + return _process_references.is_set(); +} + +bool ShenandoahHeap::unload_classes() const { + return _unload_classes.is_set(); +} + +address ShenandoahHeap::in_cset_fast_test_addr() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(heap->collection_set() != NULL, "Sanity"); + return (address) heap->collection_set()->biased_map_address(); +} + +address ShenandoahHeap::cancelled_gc_addr() { + return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of(); +} + +address ShenandoahHeap::gc_state_addr() { + return (address) ShenandoahHeap::heap()->_gc_state.addr_of(); +} + +size_t ShenandoahHeap::bytes_allocated_since_gc_start() { + return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start); +} + +void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { + OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); +} + +void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { + _degenerated_gc_in_progress.set_cond(in_progress); +} + +void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { + _full_gc_in_progress.set_cond(in_progress); +} + +void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { + assert (is_full_gc_in_progress(), "should be"); + _full_gc_move_in_progress.set_cond(in_progress); +} + +void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { + set_gc_state_mask(UPDATEREFS, in_progress); +} + +void ShenandoahHeap::register_nmethod(nmethod* nm) { + ShenandoahCodeRoots::add_nmethod(nm); +} + +void ShenandoahHeap::unregister_nmethod(nmethod* nm) { + ShenandoahCodeRoots::remove_nmethod(nm); +} + +oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { + heap_region_containing(o)->record_pin(); + return o; +} + +void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { + heap_region_containing(o)->record_unpin(); +} + +void ShenandoahHeap::sync_pinned_region_status() { + ShenandoahHeapLocker locker(lock()); + + for (size_t i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion *r = get_region(i); + if (r->is_active()) { + if (r->is_pinned()) { + if (r->pin_count() == 0) { + r->make_unpinned(); + } + } else { + if (r->pin_count() > 0) { + r->make_pinned(); + } + } + } + } + + assert_pinned_region_status(); +} + +#ifdef ASSERT +void ShenandoahHeap::assert_pinned_region_status() { + for (size_t i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* r = get_region(i); + assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), + "Region " SIZE_FORMAT " pinning status is inconsistent", i); + } +} +#endif + +GCTimer* ShenandoahHeap::gc_timer() const { + return _gc_timer; +} + +#ifdef ASSERT +void ShenandoahHeap::assert_gc_workers(uint nworkers) { + assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); + + if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { + if (UseDynamicNumberOfGCThreads || + (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { + assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); + } else { + // Use ParallelGCThreads inside safepoints + assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); + } + } else { + if (UseDynamicNumberOfGCThreads || + (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { + assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); + } else { + // Use ConcGCThreads outside safepoints + assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); + } + } +} +#endif + +ShenandoahVerifier* ShenandoahHeap::verifier() { + guarantee(ShenandoahVerify, "Should be enabled"); + assert (_verifier != NULL, "sanity"); + return _verifier; +} + +template +class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { +private: + T cl; + ShenandoahHeap* _heap; + ShenandoahRegionIterator* _regions; + bool _concurrent; +public: + ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) : + AbstractGangTask("Concurrent Update References Task"), + cl(T()), + _heap(ShenandoahHeap::heap()), + _regions(regions), + _concurrent(concurrent) { + } + + void work(uint worker_id) { + if (_concurrent) { + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); + do_work(); + } else { + ShenandoahParallelWorkerSession worker_session(worker_id); + do_work(); + } + } + +private: + void do_work() { + ShenandoahHeapRegion* r = _regions->next(); + ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); + while (r != NULL) { + HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit(); + assert (top_at_start_ur >= r->bottom(), "sanity"); + if (r->is_active() && !r->is_cset()) { + _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur); + } + if (ShenandoahPacing) { + _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom())); + } + if (_heap->check_cancelled_gc_and_yield(_concurrent)) { + return; + } + r = _regions->next(); + } + } +}; + +void ShenandoahHeap::update_heap_references(bool concurrent) { + ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent); + workers()->run_task(&task); +} + +void ShenandoahHeap::op_init_updaterefs() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); + + set_evacuation_in_progress(false); + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs); + retire_and_reset_gclabs(); + } + + if (ShenandoahVerify) { + if (!is_degenerated_gc_in_progress()) { + verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots); + } + verifier()->verify_before_updaterefs(); + } + + set_update_refs_in_progress(true); + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare); + + make_parsable(true); + for (uint i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* r = get_region(i); + r->set_concurrent_iteration_safe_limit(r->top()); + } + + // Reset iterator. + _update_refs_iterator.reset(); + } + + if (ShenandoahPacing) { + pacer()->setup_for_updaterefs(); + } +} + +void ShenandoahHeap::op_final_updaterefs() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); + + // Check if there is left-over work, and finish it + if (_update_refs_iterator.has_next()) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work); + + // Finish updating references where we left off. + clear_cancelled_gc(); + update_heap_references(false); + } + + // Clear cancelled GC, if set. On cancellation path, the block before would handle + // everything. On degenerated paths, cancelled gc would not be set anyway. + if (cancelled_gc()) { + clear_cancelled_gc(); + } + assert(!cancelled_gc(), "Should have been done right before"); + + if (ShenandoahVerify && !is_degenerated_gc_in_progress()) { + verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots); + } + + if (is_degenerated_gc_in_progress()) { + concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots); + } else { + concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots); + } + + // Has to be done before cset is clear + if (ShenandoahVerify) { + verifier()->verify_roots_in_to_space(); + } + + // Drop unnecessary "pinned" state from regions that does not have CP marks + // anymore, as this would allow trashing them below. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_sync_pinned); + sync_pinned_region_status(); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset); + trash_cset_regions(); + } + + set_has_forwarded_objects(false); + set_update_refs_in_progress(false); + + if (ShenandoahVerify) { + verifier()->verify_after_updaterefs(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } + + { + ShenandoahHeapLocker locker(lock()); + _free_set->rebuild(); + } +} + +#ifdef ASSERT +void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { + _lock.assert_owned_by_current_thread(); +} + +void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() { + _lock.assert_not_owned_by_current_thread(); +} + +void ShenandoahHeap::assert_heaplock_or_safepoint() { + _lock.assert_owned_by_current_thread_or_safepoint(); +} +#endif + +void ShenandoahHeap::print_extended_on(outputStream *st) const { + print_on(st); + print_heap_regions_on(st); +} + +bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) { + size_t slice = r->region_number() / _bitmap_regions_per_slice; + + size_t regions_from = _bitmap_regions_per_slice * slice; + size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1)); + for (size_t g = regions_from; g < regions_to; g++) { + assert (g / _bitmap_regions_per_slice == slice, "same slice"); + if (skip_self && g == r->region_number()) continue; + if (get_region(g)->is_committed()) { + return true; + } + } + return false; +} + +bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { + assert_heaplock_owned_by_current_thread(); + + // Bitmaps in special regions do not need commits + if (_bitmap_region_special) { + return true; + } + + if (is_bitmap_slice_committed(r, true)) { + // Some other region from the group is already committed, meaning the bitmap + // slice is already committed, we exit right away. + return true; + } + + // Commit the bitmap slice: + size_t slice = r->region_number() / _bitmap_regions_per_slice; + size_t off = _bitmap_bytes_per_slice * slice; + size_t len = _bitmap_bytes_per_slice; + if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) { + return false; + } + return true; +} + +bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { + assert_heaplock_owned_by_current_thread(); + + // Bitmaps in special regions do not need uncommits + if (_bitmap_region_special) { + return true; + } + + if (is_bitmap_slice_committed(r, true)) { + // Some other region from the group is still committed, meaning the bitmap + // slice is should stay committed, exit right away. + return true; + } + + // Uncommit the bitmap slice: + size_t slice = r->region_number() / _bitmap_regions_per_slice; + size_t off = _bitmap_bytes_per_slice * slice; + size_t len = _bitmap_bytes_per_slice; + if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { + return false; + } + return true; +} + +void ShenandoahHeap::safepoint_synchronize_begin() { + if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { + SuspendibleThreadSet::synchronize(); + } +} + +void ShenandoahHeap::safepoint_synchronize_end() { + if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { + SuspendibleThreadSet::desynchronize(); + } +} + +void ShenandoahHeap::vmop_entry_init_mark() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross); + + try_inject_alloc_failure(); + VM_ShenandoahInitMark op; + VMThread::execute(&op); // jump to entry_init_mark() under safepoint +} + +void ShenandoahHeap::vmop_entry_final_mark() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross); + + try_inject_alloc_failure(); + VM_ShenandoahFinalMarkStartEvac op; + VMThread::execute(&op); // jump to entry_final_mark under safepoint +} + +void ShenandoahHeap::vmop_entry_final_evac() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross); + + VM_ShenandoahFinalEvac op; + VMThread::execute(&op); // jump to entry_final_evac under safepoint +} + +void ShenandoahHeap::vmop_entry_init_updaterefs() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross); + + try_inject_alloc_failure(); + VM_ShenandoahInitUpdateRefs op; + VMThread::execute(&op); +} + +void ShenandoahHeap::vmop_entry_final_updaterefs() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross); + + try_inject_alloc_failure(); + VM_ShenandoahFinalUpdateRefs op; + VMThread::execute(&op); +} + +void ShenandoahHeap::vmop_entry_init_traversal() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross); + + try_inject_alloc_failure(); + VM_ShenandoahInitTraversalGC op; + VMThread::execute(&op); +} + +void ShenandoahHeap::vmop_entry_final_traversal() { + TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross); + + try_inject_alloc_failure(); + VM_ShenandoahFinalTraversalGC op; + VMThread::execute(&op); +} + +void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) { + TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross); + + try_inject_alloc_failure(); + VM_ShenandoahFullGC op(cause); + VMThread::execute(&op); +} + +void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) { + TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); + ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross); + + VM_ShenandoahDegeneratedGC degenerated_gc((int)point); + VMThread::execute(°enerated_gc); +} + +void ShenandoahHeap::entry_init_mark() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark); + const char* msg = init_mark_event_message(); + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_init_marking(), + "init marking"); + + op_init_mark(); +} + +void ShenandoahHeap::entry_final_mark() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark); + const char* msg = final_mark_event_message(); + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_final_marking(), + "final marking"); + + op_final_mark(); +} + +void ShenandoahHeap::entry_final_evac() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac); + static const char* msg = "Pause Final Evac"; + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + op_final_evac(); +} + +void ShenandoahHeap::entry_init_updaterefs() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs); + + static const char* msg = "Pause Init Update Refs"; + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + // No workers used in this phase, no setup required + + op_init_updaterefs(); +} + +void ShenandoahHeap::entry_final_updaterefs() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs); + + static const char* msg = "Pause Final Update Refs"; + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), + "final reference update"); + + op_final_updaterefs(); +} + +void ShenandoahHeap::entry_init_traversal() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc); + + static const char* msg = "Pause Init Traversal"; + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(), + "init traversal"); + + op_init_traversal(); +} + +void ShenandoahHeap::entry_final_traversal() { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc); + + static const char* msg = "Pause Final Traversal"; + GCTraceTime(Info, gc) time(msg, gc_timer()); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(), + "final traversal"); + + op_final_traversal(); +} + +void ShenandoahHeap::entry_full(GCCause::Cause cause) { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc); + + static const char* msg = "Pause Full"; + GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_fullgc(), + "full gc"); + + op_full(cause); +} + +void ShenandoahHeap::entry_degenerated(int point) { + ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc); + + ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point; + const char* msg = degen_event_message(dpoint); + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), + "stw degenerated gc"); + + set_degenerated_gc_in_progress(true); + op_degenerated(dpoint); + set_degenerated_gc_in_progress(false); +} + +void ShenandoahHeap::entry_mark() { + TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); + + const char* msg = conc_mark_event_message(); + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), + "concurrent marking"); + + try_inject_alloc_failure(); + op_mark(); +} + +void ShenandoahHeap::entry_evac() { + ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); + TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); + + static const char* msg = "Concurrent evacuation"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), + "concurrent evacuation"); + + try_inject_alloc_failure(); + op_conc_evac(); +} + +void ShenandoahHeap::entry_updaterefs() { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); + + static const char* msg = "Concurrent update references"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), + "concurrent reference update"); + + try_inject_alloc_failure(); + op_updaterefs(); +} +void ShenandoahHeap::entry_cleanup() { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup); + + static const char* msg = "Concurrent cleanup"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + // This phase does not use workers, no need for setup + + try_inject_alloc_failure(); + op_cleanup(); +} + +void ShenandoahHeap::entry_reset() { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset); + + static const char* msg = "Concurrent reset"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), + "concurrent reset"); + + try_inject_alloc_failure(); + op_reset(); +} + +void ShenandoahHeap::entry_preclean() { + if (ShenandoahPreclean && process_references()) { + static const char* msg = "Concurrent precleaning"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(), + "concurrent preclean", + /* check_workers = */ false); + + try_inject_alloc_failure(); + op_preclean(); + } +} + +void ShenandoahHeap::entry_traversal() { + static const char* msg = "Concurrent traversal"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); + + ShenandoahWorkerScope scope(workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(), + "concurrent traversal"); + + try_inject_alloc_failure(); + op_traversal(); +} + +void ShenandoahHeap::entry_uncommit(double shrink_before) { + static const char *msg = "Concurrent uncommit"; + GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit); + + op_uncommit(shrink_before); +} + +void ShenandoahHeap::try_inject_alloc_failure() { + if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) { + _inject_alloc_failure.set(); + os::naked_short_sleep(1); + if (cancelled_gc()) { + log_info(gc)("Allocation failure was successfully injected"); + } + } +} + +bool ShenandoahHeap::should_inject_alloc_failure() { + return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset(); +} + +void ShenandoahHeap::initialize_serviceability() { + _memory_pool = new ShenandoahMemoryPool(this); + _cycle_memory_manager.add_pool(_memory_pool); + _stw_memory_manager.add_pool(_memory_pool); +} + +GrowableArray ShenandoahHeap::memory_managers() { + GrowableArray memory_managers(2); + memory_managers.append(&_cycle_memory_manager); + memory_managers.append(&_stw_memory_manager); + return memory_managers; +} + +GrowableArray ShenandoahHeap::memory_pools() { + GrowableArray memory_pools(1); + memory_pools.append(_memory_pool); + return memory_pools; +} + +MemoryUsage ShenandoahHeap::memory_usage() { + return _memory_pool->get_memory_usage(); +} + +void ShenandoahHeap::enter_evacuation() { + _oom_evac_handler.enter_evacuation(); +} + +void ShenandoahHeap::leave_evacuation() { + _oom_evac_handler.leave_evacuation(); +} + +ShenandoahRegionIterator::ShenandoahRegionIterator() : + _heap(ShenandoahHeap::heap()), + _index(0) {} + +ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) : + _heap(heap), + _index(0) {} + +void ShenandoahRegionIterator::reset() { + _index = 0; +} + +bool ShenandoahRegionIterator::has_next() const { + return _index < _heap->num_regions(); +} + +char ShenandoahHeap::gc_state() const { + return _gc_state.raw_value(); +} + +void ShenandoahHeap::deduplicate_string(oop str) { + assert(java_lang_String::is_instance(str), "invariant"); + + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::deduplicate(str); + } +} + +const char* ShenandoahHeap::init_mark_event_message() const { + bool update_refs = has_forwarded_objects(); + bool proc_refs = process_references(); + bool unload_cls = unload_classes(); + + if (update_refs && proc_refs && unload_cls) { + return "Pause Init Mark (update refs) (process weakrefs) (unload classes)"; + } else if (update_refs && proc_refs) { + return "Pause Init Mark (update refs) (process weakrefs)"; + } else if (update_refs && unload_cls) { + return "Pause Init Mark (update refs) (unload classes)"; + } else if (proc_refs && unload_cls) { + return "Pause Init Mark (process weakrefs) (unload classes)"; + } else if (update_refs) { + return "Pause Init Mark (update refs)"; + } else if (proc_refs) { + return "Pause Init Mark (process weakrefs)"; + } else if (unload_cls) { + return "Pause Init Mark (unload classes)"; + } else { + return "Pause Init Mark"; + } +} + +const char* ShenandoahHeap::final_mark_event_message() const { + bool update_refs = has_forwarded_objects(); + bool proc_refs = process_references(); + bool unload_cls = unload_classes(); + + if (update_refs && proc_refs && unload_cls) { + return "Pause Final Mark (update refs) (process weakrefs) (unload classes)"; + } else if (update_refs && proc_refs) { + return "Pause Final Mark (update refs) (process weakrefs)"; + } else if (update_refs && unload_cls) { + return "Pause Final Mark (update refs) (unload classes)"; + } else if (proc_refs && unload_cls) { + return "Pause Final Mark (process weakrefs) (unload classes)"; + } else if (update_refs) { + return "Pause Final Mark (update refs)"; + } else if (proc_refs) { + return "Pause Final Mark (process weakrefs)"; + } else if (unload_cls) { + return "Pause Final Mark (unload classes)"; + } else { + return "Pause Final Mark"; + } +} + +const char* ShenandoahHeap::conc_mark_event_message() const { + bool update_refs = has_forwarded_objects(); + bool proc_refs = process_references(); + bool unload_cls = unload_classes(); + + if (update_refs && proc_refs && unload_cls) { + return "Concurrent marking (update refs) (process weakrefs) (unload classes)"; + } else if (update_refs && proc_refs) { + return "Concurrent marking (update refs) (process weakrefs)"; + } else if (update_refs && unload_cls) { + return "Concurrent marking (update refs) (unload classes)"; + } else if (proc_refs && unload_cls) { + return "Concurrent marking (process weakrefs) (unload classes)"; + } else if (update_refs) { + return "Concurrent marking (update refs)"; + } else if (proc_refs) { + return "Concurrent marking (process weakrefs)"; + } else if (unload_cls) { + return "Concurrent marking (unload classes)"; + } else { + return "Concurrent marking"; + } +} + +const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const { + switch (point) { + case _degenerated_unset: + return "Pause Degenerated GC ()"; + case _degenerated_traversal: + return "Pause Degenerated GC (Traversal)"; + case _degenerated_outside_cycle: + return "Pause Degenerated GC (Outside of Cycle)"; + case _degenerated_mark: + return "Pause Degenerated GC (Mark)"; + case _degenerated_evac: + return "Pause Degenerated GC (Evacuation)"; + case _degenerated_updaterefs: + return "Pause Degenerated GC (Update Refs)"; + default: + ShouldNotReachHere(); + return "ERROR"; + } +} + +jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) { +#ifdef ASSERT + assert(_liveness_cache != NULL, "sanity"); + assert(worker_id < _max_workers, "sanity"); + for (uint i = 0; i < num_regions(); i++) { + assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); + } +#endif + return _liveness_cache[worker_id]; +} + +void ShenandoahHeap::flush_liveness_cache(uint worker_id) { + assert(worker_id < _max_workers, "sanity"); + assert(_liveness_cache != NULL, "sanity"); + jushort* ld = _liveness_cache[worker_id]; + for (uint i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* r = get_region(i); + jushort live = ld[i]; + if (live > 0) { + r->increase_live_data_gc_words(live); + ld[i] = 0; + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-01-17 17:10:07.382130629 +0100 @@ -0,0 +1,724 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP + +#include "gc/shared/markBitMap.hpp" +#include "gc/shared/softRefPolicy.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahAllocRequest.hpp" +#include "gc/shenandoah/shenandoahLock.hpp" +#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "services/memoryManager.hpp" + +class ConcurrentGCTimer; +class ReferenceProcessor; +class ShenandoahAllocTracker; +class ShenandoahCollectorPolicy; +class ShenandoahControlThread; +class ShenandoahGCSession; +class ShenandoahGCStateResetter; +class ShenandoahHeuristics; +class ShenandoahMarkingContext; +class ShenandoahMarkCompact; +class ShenandoahMode; +class ShenandoahPhaseTimings; +class ShenandoahHeap; +class ShenandoahHeapRegion; +class ShenandoahHeapRegionClosure; +class ShenandoahCollectionSet; +class ShenandoahFreeSet; +class ShenandoahConcurrentMark; +class ShenandoahMarkCompact; +class ShenandoahMonitoringSupport; +class ShenandoahPacer; +class ShenandoahTraversalGC; +class ShenandoahVerifier; +class ShenandoahWorkGang; +class VMStructs; + +class ShenandoahRegionIterator : public StackObj { +private: + ShenandoahHeap* _heap; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _index; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + // No implicit copying: iterators should be passed by reference to capture the state + ShenandoahRegionIterator(const ShenandoahRegionIterator& that); + ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o); + +public: + ShenandoahRegionIterator(); + ShenandoahRegionIterator(ShenandoahHeap* heap); + + // Reset iterator to default state + void reset(); + + // Returns next region, or NULL if there are no more regions. + // This is multi-thread-safe. + inline ShenandoahHeapRegion* next(); + + // This is *not* MT safe. However, in the absence of multithreaded access, it + // can be used to determine if there is more work to do. + bool has_next() const; +}; + +class ShenandoahHeapRegionClosure : public StackObj { +public: + virtual void heap_region_do(ShenandoahHeapRegion* r) = 0; + virtual bool is_thread_safe() { return false; } +}; + +#ifdef ASSERT +class ShenandoahAssertToSpaceClosure : public OopClosure { +private: + template + void do_oop_work(T* p); +public: + void do_oop(narrowOop* p); + void do_oop(oop* p); +}; +#endif + +typedef ShenandoahLock ShenandoahHeapLock; +typedef ShenandoahLocker ShenandoahHeapLocker; + +// Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers +// to encode forwarding data. See BrooksPointer for details on forwarding data encoding. +// See ShenandoahControlThread for GC cycle structure. +// +class ShenandoahHeap : public CollectedHeap { + friend class ShenandoahAsserts; + friend class VMStructs; + friend class ShenandoahGCSession; + friend class ShenandoahGCStateResetter; + +// ---------- Locks that guard important data structures in Heap +// +private: + ShenandoahHeapLock _lock; + +public: + ShenandoahHeapLock* lock() { + return &_lock; + } + + void assert_heaplock_owned_by_current_thread() NOT_DEBUG_RETURN; + void assert_heaplock_not_owned_by_current_thread() NOT_DEBUG_RETURN; + void assert_heaplock_or_safepoint() NOT_DEBUG_RETURN; + +// ---------- Initialization, termination, identification, printing routines +// +public: + static ShenandoahHeap* heap(); + static ShenandoahHeap* heap_no_check(); + + const char* name() const { return "Shenandoah"; } + ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; } + + ShenandoahHeap(ShenandoahCollectorPolicy* policy); + jint initialize(); + void post_initialize(); + void initialize_heuristics(); + + void initialize_serviceability(); + + void print_on(outputStream* st) const; + void print_extended_on(outputStream *st) const; + void print_tracing_info() const; + void print_gc_threads_on(outputStream* st) const; + void print_heap_regions_on(outputStream* st) const; + + void stop(); + + void prepare_for_verify(); + void verify(VerifyOption vo); + +// ---------- Heap counters and metrics +// +private: + size_t _initial_size; + size_t _minimum_size; + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _used; + volatile size_t _committed; + volatile size_t _bytes_allocated_since_gc_start; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + void increase_used(size_t bytes); + void decrease_used(size_t bytes); + void set_used(size_t bytes); + + void increase_committed(size_t bytes); + void decrease_committed(size_t bytes); + void increase_allocated(size_t bytes); + + size_t bytes_allocated_since_gc_start(); + void reset_bytes_allocated_since_gc_start(); + + size_t min_capacity() const; + size_t max_capacity() const; + size_t initial_capacity() const; + size_t capacity() const; + size_t used() const; + size_t committed() const; + +// ---------- Workers handling +// +private: + uint _max_workers; + ShenandoahWorkGang* _workers; + ShenandoahWorkGang* _safepoint_workers; + +public: + uint max_workers(); + void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN; + + WorkGang* workers() const; + WorkGang* get_safepoint_workers(); + + void gc_threads_do(ThreadClosure* tcl) const; + +// ---------- Heap regions handling machinery +// +private: + MemRegion _heap_region; + bool _heap_region_special; + size_t _num_regions; + ShenandoahHeapRegion** _regions; + ShenandoahRegionIterator _update_refs_iterator; + +public: + inline size_t num_regions() const { return _num_regions; } + inline bool is_heap_region_special() { return _heap_region_special; } + + inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; + inline size_t heap_region_index_containing(const void* addr) const; + + inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; + + void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; + void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; + +// ---------- GC state machinery +// +// GC state describes the important parts of collector state, that may be +// used to make barrier selection decisions in the native and generated code. +// Multiple bits can be set at once. +// +// Important invariant: when GC state is zero, the heap is stable, and no barriers +// are required. +// +public: + enum GCStateBitPos { + // Heap has forwarded objects: needs LRB barriers. + HAS_FORWARDED_BITPOS = 0, + + // Heap is under marking: needs SATB barriers. + MARKING_BITPOS = 1, + + // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED) + EVACUATION_BITPOS = 2, + + // Heap is under updating: needs no additional barriers. + UPDATEREFS_BITPOS = 3, + + // Heap is under traversal collection + TRAVERSAL_BITPOS = 4 + }; + + enum GCState { + STABLE = 0, + HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS, + MARKING = 1 << MARKING_BITPOS, + EVACUATION = 1 << EVACUATION_BITPOS, + UPDATEREFS = 1 << UPDATEREFS_BITPOS, + TRAVERSAL = 1 << TRAVERSAL_BITPOS + }; + +private: + ShenandoahSharedBitmap _gc_state; + ShenandoahSharedFlag _degenerated_gc_in_progress; + ShenandoahSharedFlag _full_gc_in_progress; + ShenandoahSharedFlag _full_gc_move_in_progress; + ShenandoahSharedFlag _progress_last_gc; + + void set_gc_state_all_threads(char state); + void set_gc_state_mask(uint mask, bool value); + +public: + char gc_state() const; + static address gc_state_addr(); + + void set_concurrent_mark_in_progress(bool in_progress); + void set_evacuation_in_progress(bool in_progress); + void set_update_refs_in_progress(bool in_progress); + void set_degenerated_gc_in_progress(bool in_progress); + void set_full_gc_in_progress(bool in_progress); + void set_full_gc_move_in_progress(bool in_progress); + void set_concurrent_traversal_in_progress(bool in_progress); + void set_has_forwarded_objects(bool cond); + + inline bool is_stable() const; + inline bool is_idle() const; + inline bool is_concurrent_mark_in_progress() const; + inline bool is_update_refs_in_progress() const; + inline bool is_evacuation_in_progress() const; + inline bool is_degenerated_gc_in_progress() const; + inline bool is_full_gc_in_progress() const; + inline bool is_full_gc_move_in_progress() const; + inline bool is_concurrent_traversal_in_progress() const; + inline bool has_forwarded_objects() const; + inline bool is_gc_in_progress_mask(uint mask) const; + +// ---------- GC cancellation and degeneration machinery +// +// Cancelled GC flag is used to notify concurrent phases that they should terminate. +// +public: + enum ShenandoahDegenPoint { + _degenerated_unset, + _degenerated_traversal, + _degenerated_outside_cycle, + _degenerated_mark, + _degenerated_evac, + _degenerated_updaterefs, + _DEGENERATED_LIMIT + }; + + static const char* degen_point_to_string(ShenandoahDegenPoint point) { + switch (point) { + case _degenerated_unset: + return ""; + case _degenerated_traversal: + return "Traversal"; + case _degenerated_outside_cycle: + return "Outside of Cycle"; + case _degenerated_mark: + return "Mark"; + case _degenerated_evac: + return "Evacuation"; + case _degenerated_updaterefs: + return "Update Refs"; + default: + ShouldNotReachHere(); + return "ERROR"; + } + }; + +private: + enum CancelState { + // Normal state. GC has not been cancelled and is open for cancellation. + // Worker threads can suspend for safepoint. + CANCELLABLE, + + // GC has been cancelled. Worker threads can not suspend for + // safepoint but must finish their work as soon as possible. + CANCELLED, + + // GC has not been cancelled and must not be cancelled. At least + // one worker thread checks for pending safepoint and may suspend + // if a safepoint is pending. + NOT_CANCELLED + }; + + ShenandoahSharedEnumFlag _cancelled_gc; + inline bool try_cancel_gc(); + +public: + static address cancelled_gc_addr(); + + inline bool cancelled_gc() const; + inline bool check_cancelled_gc_and_yield(bool sts_active = true); + + inline void clear_cancelled_gc(); + + void cancel_gc(GCCause::Cause cause); + +// ---------- GC operations entry points +// +public: + // Entry points to STW GC operations, these cause a related safepoint, that then + // call the entry method below + void vmop_entry_init_mark(); + void vmop_entry_final_mark(); + void vmop_entry_final_evac(); + void vmop_entry_init_updaterefs(); + void vmop_entry_final_updaterefs(); + void vmop_entry_init_traversal(); + void vmop_entry_final_traversal(); + void vmop_entry_full(GCCause::Cause cause); + void vmop_degenerated(ShenandoahDegenPoint point); + + // Entry methods to normally STW GC operations. These set up logging, monitoring + // and workers for net VM operation + void entry_init_mark(); + void entry_final_mark(); + void entry_final_evac(); + void entry_init_updaterefs(); + void entry_final_updaterefs(); + void entry_init_traversal(); + void entry_final_traversal(); + void entry_full(GCCause::Cause cause); + void entry_degenerated(int point); + + // Entry methods to normally concurrent GC operations. These set up logging, monitoring + // for concurrent operation. + void entry_reset(); + void entry_mark(); + void entry_preclean(); + void entry_cleanup(); + void entry_evac(); + void entry_updaterefs(); + void entry_traversal(); + void entry_uncommit(double shrink_before); + +private: + // Actual work for the phases + void op_init_mark(); + void op_final_mark(); + void op_final_evac(); + void op_init_updaterefs(); + void op_final_updaterefs(); + void op_init_traversal(); + void op_final_traversal(); + void op_full(GCCause::Cause cause); + void op_degenerated(ShenandoahDegenPoint point); + void op_degenerated_fail(); + void op_degenerated_futile(); + + void op_reset(); + void op_mark(); + void op_preclean(); + void op_cleanup(); + void op_conc_evac(); + void op_stw_evac(); + void op_updaterefs(); + void op_traversal(); + void op_uncommit(double shrink_before); + + // Messages for GC trace events, they have to be immortal for + // passing around the logging/tracing systems + const char* init_mark_event_message() const; + const char* final_mark_event_message() const; + const char* conc_mark_event_message() const; + const char* degen_event_message(ShenandoahDegenPoint point) const; + +// ---------- GC subsystems +// +private: + ShenandoahControlThread* _control_thread; + ShenandoahCollectorPolicy* _shenandoah_policy; + ShenandoahMode* _gc_mode; + ShenandoahHeuristics* _heuristics; + ShenandoahFreeSet* _free_set; + ShenandoahConcurrentMark* _scm; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahMarkCompact* _full_gc; + ShenandoahPacer* _pacer; + ShenandoahVerifier* _verifier; + + ShenandoahAllocTracker* _alloc_tracker; + ShenandoahPhaseTimings* _phase_timings; + + ShenandoahControlThread* control_thread() { return _control_thread; } + ShenandoahMarkCompact* full_gc() { return _full_gc; } + +public: + ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; } + ShenandoahHeuristics* heuristics() const { return _heuristics; } + ShenandoahFreeSet* free_set() const { return _free_set; } + ShenandoahConcurrentMark* concurrent_mark() { return _scm; } + ShenandoahTraversalGC* traversal_gc() const { return _traversal_gc; } + bool is_traversal_mode() const { return _traversal_gc != NULL; } + ShenandoahPacer* pacer() const { return _pacer; } + + ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } + ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; } + + ShenandoahVerifier* verifier(); + +// ---------- VM subsystem bindings +// +private: + ShenandoahMonitoringSupport* _monitoring_support; + MemoryPool* _memory_pool; + GCMemoryManager _stw_memory_manager; + GCMemoryManager _cycle_memory_manager; + ConcurrentGCTimer* _gc_timer; + SoftRefPolicy _soft_ref_policy; + +public: + ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; } + GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; } + GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; } + SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } + + GrowableArray memory_managers(); + GrowableArray memory_pools(); + MemoryUsage memory_usage(); + GCTracer* tracer(); + GCTimer* gc_timer() const; + CollectorPolicy* collector_policy() const; + +// ---------- Reference processing +// +private: + AlwaysTrueClosure _subject_to_discovery; + ReferenceProcessor* _ref_processor; + ShenandoahSharedFlag _process_references; + + void ref_processing_init(); + +public: + ReferenceProcessor* ref_processor() { return _ref_processor; } + void set_process_references(bool pr); + bool process_references() const; + +// ---------- Class Unloading +// +private: + ShenandoahSharedFlag _unload_classes; + +public: + void set_unload_classes(bool uc); + bool unload_classes() const; + + // Delete entries for dead interned string and clean up unreferenced symbols + // in symbol table, possibly in parallel. + void unload_classes_and_cleanup_tables(bool full_gc); + +// ---------- Generic interface hooks +// Minor things that super-interface expects us to implement to play nice with +// the rest of runtime. Some of the things here are not required to be implemented, +// and can be stubbed out. +// +public: + AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL); + bool is_maximal_no_gc() const shenandoah_not_implemented_return(false); + + bool is_in(const void* p) const; + + // All objects can potentially move + bool is_scavengable(oop obj) { return true; }; + + void collect(GCCause::Cause cause); + void do_full_collection(bool clear_all_soft_refs); + + // Used for parsing heap during error printing + HeapWord* block_start(const void* addr) const; + size_t block_size(const HeapWord* addr) const; + bool block_is_obj(const HeapWord* addr) const; + + // Used for native heap walkers: heap dumpers, mostly + void object_iterate(ObjectClosure* cl); + void safe_object_iterate(ObjectClosure* cl); + + // Used by RMI + jlong millis_since_last_gc(); + +// ---------- Safepoint interface hooks +// +public: + void safepoint_synchronize_begin(); + void safepoint_synchronize_end(); + +// ---------- Code roots handling hooks +// +public: + void register_nmethod(nmethod* nm); + void unregister_nmethod(nmethod* nm); + +// ---------- Pinning hooks +// +public: + // Shenandoah supports per-object (per-region) pinning + bool supports_object_pinning() const { return true; } + + oop pin_object(JavaThread* thread, oop obj); + void unpin_object(JavaThread* thread, oop obj); + + void sync_pinned_region_status(); + void assert_pinned_region_status() NOT_DEBUG_RETURN; + +// ---------- Allocation support +// +private: + HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region); + inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); + HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); + HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); + void retire_and_reset_gclabs(); + +public: + HeapWord* allocate_memory(ShenandoahAllocRequest& request); + HeapWord* mem_allocate(size_t size, bool* what); + MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype); + + void notify_mutator_alloc_words(size_t words, bool waste); + + // Shenandoah supports TLAB allocation + bool supports_tlab_allocation() const { return true; } + + HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size); + size_t tlab_capacity(Thread *thr) const; + size_t unsafe_max_tlab_alloc(Thread *thread) const; + size_t max_tlab_size() const; + size_t tlab_used(Thread* ignored) const; + + void accumulate_statistics_tlabs(); + void resize_tlabs(); + + void ensure_parsability(bool retire_tlabs); + void make_parsable(bool retire_tlabs); + +// ---------- Marking support +// +private: + ShenandoahMarkingContext* _marking_context; + MemRegion _bitmap_region; + MemRegion _aux_bitmap_region; + MarkBitMap _verification_bit_map; + MarkBitMap _aux_bit_map; + + size_t _bitmap_size; + size_t _bitmap_regions_per_slice; + size_t _bitmap_bytes_per_slice; + + bool _bitmap_region_special; + bool _aux_bitmap_region_special; + + // Used for buffering per-region liveness data. + // Needed since ShenandoahHeapRegion uses atomics to update liveness. + // + // The array has max-workers elements, each of which is an array of + // jushort * max_regions. The choice of jushort is not accidental: + // there is a tradeoff between static/dynamic footprint that translates + // into cache pressure (which is already high during marking), and + // too many atomic updates. size_t/jint is too large, jbyte is too small. + jushort** _liveness_cache; + +public: + inline ShenandoahMarkingContext* complete_marking_context() const; + inline ShenandoahMarkingContext* marking_context() const; + inline void mark_complete_marking_context(); + inline void mark_incomplete_marking_context(); + + template + inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); + + template + inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); + + template + inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); + + void reset_mark_bitmap(); + + // SATB barriers hooks + inline bool requires_marking(const void* entry) const; + void force_satb_flush_all_threads(); + + // Support for bitmap uncommits + bool commit_bitmap_slice(ShenandoahHeapRegion *r); + bool uncommit_bitmap_slice(ShenandoahHeapRegion *r); + bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false); + + // Liveness caching support + jushort* get_liveness_cache(uint worker_id); + void flush_liveness_cache(uint worker_id); + +// ---------- Evacuation support +// +private: + ShenandoahCollectionSet* _collection_set; + ShenandoahEvacOOMHandler _oom_evac_handler; + + void evacuate_and_update_roots(); + +public: + static address in_cset_fast_test_addr(); + + ShenandoahCollectionSet* collection_set() const { return _collection_set; } + + template + inline bool in_collection_set(T obj) const; + + // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*. + inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false); + + // Evacuates object src. Returns the evacuated object, either evacuated + // by this thread, or by some other thread. + inline oop evacuate_object(oop src, Thread* thread); + + // Call before/after evacuation. + void enter_evacuation(); + void leave_evacuation(); + +// ---------- Helper functions +// +public: + template + inline oop evac_update_with_forwarded(T* p); + + template + inline oop maybe_update_with_forwarded(T* p); + + template + inline oop maybe_update_with_forwarded_not_null(T* p, oop obj); + + template + inline oop update_with_forwarded_not_null(T* p, oop obj); + + static inline oop cas_oop(oop n, narrowOop* addr, oop c); + static inline oop cas_oop(oop n, oop* addr, oop c); + static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c); + + void trash_humongous_region_at(ShenandoahHeapRegion *r); + + void deduplicate_string(oop str); + + void stop_concurrent_marking(); + +private: + void trash_cset_regions(); + void update_heap_references(bool concurrent); + +// ---------- Testing helpers functions +// +private: + ShenandoahSharedFlag _inject_alloc_failure; + + void try_inject_alloc_failure(); + bool should_inject_alloc_failure(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp 2020-01-17 17:10:07.988130596 +0100 @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP + +#include "classfile/javaClasses.inline.hpp" +#include "gc/shared/markBitMap.inline.hpp" +#include "gc/shared/threadLocalAllocBuffer.inline.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" +#include "gc/shenandoah/shenandoahForwarding.inline.hpp" +#include "gc/shenandoah/shenandoahWorkGroup.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/prefetch.hpp" +#include "runtime/prefetch.inline.hpp" +#include "runtime/thread.hpp" +#include "utilities/copy.hpp" +#include "utilities/globalDefinitions.hpp" + + +inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { + size_t new_index = Atomic::add((size_t) 1, &_index); + // get_region() provides the bounds-check and returns NULL on OOB. + return _heap->get_region(new_index - 1); +} + +inline bool ShenandoahHeap::has_forwarded_objects() const { + return _gc_state.is_set(HAS_FORWARDED); +} + +inline WorkGang* ShenandoahHeap::workers() const { + return _workers; +} + +inline WorkGang* ShenandoahHeap::get_safepoint_workers() { + return _safepoint_workers; +} + +inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { + uintptr_t region_start = ((uintptr_t) addr); + uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); + assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); + return index; +} + +inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { + size_t index = heap_region_index_containing(addr); + ShenandoahHeapRegion* const result = get_region(index); + assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); + return result; +} + +template +inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { + if (in_collection_set(obj)) { + shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress()); + obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + RawAccess::oop_store(p, obj); + } +#ifdef ASSERT + else { + shenandoah_assert_not_forwarded(p, obj); + } +#endif + return obj; +} + +template +inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + return maybe_update_with_forwarded_not_null(p, obj); + } else { + return NULL; + } +} + +template +inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop heap_oop = CompressedOops::decode_not_null(o); + if (in_collection_set(heap_oop)) { + oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); + if (forwarded_oop == heap_oop) { + forwarded_oop = evacuate_object(heap_oop, Thread::current()); + } + oop prev = cas_oop(forwarded_oop, p, heap_oop); + if (prev == heap_oop) { + return forwarded_oop; + } else { + return NULL; + } + } + return heap_oop; + } else { + return NULL; + } +} + +inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { + assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); + return (oop) Atomic::cmpxchg(n, addr, c); +} + +inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { + narrowOop val = CompressedOops::encode(n); + return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c)); +} + +inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { + assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); + narrowOop cmp = CompressedOops::encode(c); + narrowOop val = CompressedOops::encode(n); + return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp)); +} + +template +inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { + shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress()); + shenandoah_assert_correct(p, heap_oop); + + if (in_collection_set(heap_oop)) { + oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); + if (forwarded_oop == heap_oop) { + // E.g. during evacuation. + return forwarded_oop; + } + + shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress()); + shenandoah_assert_not_forwarded(p, forwarded_oop); + shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc()); + + // If this fails, another thread wrote to p before us, it will be logged in SATB and the + // reference be updated later. + oop witness = cas_oop(forwarded_oop, p, heap_oop); + + if (witness != heap_oop) { + // CAS failed, someone had beat us to it. Normally, we would return the failure witness, + // because that would be the proper write of to-space object, enforced by strong barriers. + // However, there is a corner case with arraycopy. It can happen that a Java thread + // beats us with an arraycopy, which first copies the array, which potentially contains + // from-space refs, and only afterwards updates all from-space refs to to-space refs, + // which leaves a short window where the new array elements can be from-space. + // In this case, we can just resolve the result again. As we resolve, we need to consider + // the contended write might have been NULL. + oop result = ShenandoahBarrierSet::resolve_forwarded(witness); + shenandoah_assert_not_forwarded_except(p, result, (result == NULL)); + shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc()); + return result; + } else { + // Success! We have updated with known to-space copy. We have already asserted it is sane. + return forwarded_oop; + } + } else { + shenandoah_assert_not_forwarded(p, heap_oop); + return heap_oop; + } +} + +inline bool ShenandoahHeap::cancelled_gc() const { + return _cancelled_gc.get() == CANCELLED; +} + +inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { + if (! (sts_active && ShenandoahSuspendibleWorkers)) { + return cancelled_gc(); + } + + jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE); + if (prev == CANCELLABLE || prev == NOT_CANCELLED) { + if (SuspendibleThreadSet::should_yield()) { + SuspendibleThreadSet::yield(); + } + + // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets + // to restore to CANCELLABLE. + if (prev == CANCELLABLE) { + _cancelled_gc.set(CANCELLABLE); + } + return false; + } else { + return true; + } +} + +inline bool ShenandoahHeap::try_cancel_gc() { + while (true) { + jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE); + if (prev == CANCELLABLE) return true; + else if (prev == CANCELLED) return false; + assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers"); + assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED"); + { + // We need to provide a safepoint here, otherwise we might + // spin forever if a SP is pending. + ThreadBlockInVM sp(JavaThread::current()); + SpinPause(); + } + } +} + +inline void ShenandoahHeap::clear_cancelled_gc() { + _cancelled_gc.set(CANCELLABLE); + _oom_evac_handler.clear(); +} + +inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { + assert(UseTLAB, "TLABs should be enabled"); + + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + if (gclab == NULL) { + assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), + "Performance: thread should have GCLAB: %s", thread->name()); + // No GCLABs in this thread, fallback to shared allocation + return NULL; + } + HeapWord* obj = gclab->allocate(size); + if (obj != NULL) { + return obj; + } + // Otherwise... + return allocate_from_gclab_slow(thread, size); +} + +inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { + if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { + // This thread went through the OOM during evac protocol and it is safe to return + // the forward pointer. It must not attempt to evacuate any more. + return ShenandoahBarrierSet::resolve_forwarded(p); + } + + assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); + + size_t size = p->size(); + + assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); + + bool alloc_from_gclab = true; + HeapWord* copy = NULL; + +#ifdef ASSERT + if (ShenandoahOOMDuringEvacALot && + (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call + copy = NULL; + } else { +#endif + if (UseTLAB) { + copy = allocate_from_gclab(thread, size); + } + if (copy == NULL) { + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); + copy = allocate_memory(req); + alloc_from_gclab = false; + } +#ifdef ASSERT + } +#endif + + if (copy == NULL) { + control_thread()->handle_alloc_failure_evac(size); + + _oom_evac_handler.handle_out_of_memory_during_evacuation(); + + return ShenandoahBarrierSet::resolve_forwarded(p); + } + + // Copy the object: + Copy::aligned_disjoint_words((HeapWord*) p, copy, size); + + // Try to install the new forwarding pointer. + oop copy_val = oop(copy); + oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); + if (result == copy_val) { + // Successfully evacuated. Our copy is now the public one! + shenandoah_assert_correct(NULL, copy_val); + return copy_val; + } else { + // Failed to evacuate. We need to deal with the object that is left behind. Since this + // new allocation is certainly after TAMS, it will be considered live in the next cycle. + // But if it happens to contain references to evacuated regions, those references would + // not get updated for this stale copy during this cycle, and we will crash while scanning + // it the next cycle. + // + // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next + // object will overwrite this stale copy, or the filler object on LAB retirement will + // do this. For non-GCLAB allocations, we have no way to retract the allocation, and + // have to explicitly overwrite the copy with the filler object. With that overwrite, + // we have to keep the fwdptr initialized and pointing to our (stale) copy. + if (alloc_from_gclab) { + ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); + } else { + fill_with_object(copy, size); + shenandoah_assert_correct(NULL, copy_val); + } + shenandoah_assert_correct(NULL, result); + return result; + } +} + +inline bool ShenandoahHeap::requires_marking(const void* entry) const { + return !_marking_context->is_marked(oop(entry)); +} + +template +inline bool ShenandoahHeap::in_collection_set(T p) const { + HeapWord* obj = (HeapWord*) p; + assert(collection_set() != NULL, "Sanity"); + assert(is_in(obj), "should be in heap"); + + return collection_set()->is_in(obj); +} + +inline bool ShenandoahHeap::is_stable() const { + return _gc_state.is_clear(); +} + +inline bool ShenandoahHeap::is_idle() const { + return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL); +} + +inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { + return _gc_state.is_set(MARKING); +} + +inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const { + return _gc_state.is_set(TRAVERSAL); +} + +inline bool ShenandoahHeap::is_evacuation_in_progress() const { + return _gc_state.is_set(EVACUATION); +} + +inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { + return _gc_state.is_set(mask); +} + +inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { + return _degenerated_gc_in_progress.is_set(); +} + +inline bool ShenandoahHeap::is_full_gc_in_progress() const { + return _full_gc_in_progress.is_set(); +} + +inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { + return _full_gc_move_in_progress.is_set(); +} + +inline bool ShenandoahHeap::is_update_refs_in_progress() const { + return _gc_state.is_set(UPDATEREFS); +} + +template +inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_iterate(region, cl, region->top()); +} + +template +inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { + assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); + + ShenandoahMarkingContext* const ctx = complete_marking_context(); + assert(ctx->is_complete(), "sanity"); + + MarkBitMap* mark_bit_map = ctx->mark_bit_map(); + HeapWord* tams = ctx->top_at_mark_start(region); + + size_t skip_bitmap_delta = 1; + HeapWord* start = region->bottom(); + HeapWord* end = MIN2(tams, region->end()); + + // Step 1. Scan below the TAMS based on bitmap data. + HeapWord* limit_bitmap = MIN2(limit, tams); + + // Try to scan the initial candidate. If the candidate is above the TAMS, it would + // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. + HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end); + + intx dist = ShenandoahMarkScanPrefetch; + if (dist > 0) { + // Batched scan that prefetches the oop data, anticipating the access to + // either header, oop field, or forwarding pointer. Not that we cannot + // touch anything in oop, while it still being prefetched to get enough + // time for prefetch to work. This is why we try to scan the bitmap linearly, + // disregarding the object size. However, since we know forwarding pointer + // preceeds the object, we can skip over it. Once we cannot trust the bitmap, + // there is no point for prefetching the oop contents, as oop->size() will + // touch it prematurely. + + // No variable-length arrays in standard C++, have enough slots to fit + // the prefetch distance. + static const int SLOT_COUNT = 256; + guarantee(dist <= SLOT_COUNT, "adjust slot count"); + HeapWord* slots[SLOT_COUNT]; + + int avail; + do { + avail = 0; + for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { + Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); + slots[avail++] = cb; + cb += skip_bitmap_delta; + if (cb < limit_bitmap) { + cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); + } + } + + for (int c = 0; c < avail; c++) { + assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); + assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); + oop obj = oop(slots[c]); + assert(oopDesc::is_oop(obj), "sanity"); + assert(ctx->is_marked(obj), "object expected to be marked"); + cl->do_object(obj); + } + } while (avail > 0); + } else { + while (cb < limit_bitmap) { + assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); + assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); + oop obj = oop(cb); + assert(oopDesc::is_oop(obj), "sanity"); + assert(ctx->is_marked(obj), "object expected to be marked"); + cl->do_object(obj); + cb += skip_bitmap_delta; + if (cb < limit_bitmap) { + cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); + } + } + } + + // Step 2. Accurate size-based traversal, happens past the TAMS. + // This restarts the scan at TAMS, which makes sure we traverse all objects, + // regardless of what happened at Step 1. + HeapWord* cs = tams; + while (cs < limit) { + assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); + assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); + oop obj = oop(cs); + assert(oopDesc::is_oop(obj), "sanity"); + assert(ctx->is_marked(obj), "object expected to be marked"); + int size = obj->size(); + cl->do_object(obj); + cs += size; + } +} + +template +class ShenandoahObjectToOopClosure : public ObjectClosure { + T* _cl; +public: + ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} + + void do_object(oop obj) { + obj->oop_iterate(_cl); + } +}; + +template +class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { + T* _cl; + MemRegion _bounds; +public: + ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : + _cl(cl), _bounds(bottom, top) {} + + void do_object(oop obj) { + obj->oop_iterate(_cl, _bounds); + } +}; + +template +inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { + if (region->is_humongous()) { + HeapWord* bottom = region->bottom(); + if (top > bottom) { + region = region->humongous_start_region(); + ShenandoahObjectToOopBoundedClosure objs(cl, bottom, top); + marked_object_iterate(region, &objs); + } + } else { + ShenandoahObjectToOopClosure objs(cl); + marked_object_iterate(region, &objs, top); + } +} + +inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { + if (region_idx < _num_regions) { + return _regions[region_idx]; + } else { + return NULL; + } +} + +inline void ShenandoahHeap::mark_complete_marking_context() { + _marking_context->mark_complete(); +} + +inline void ShenandoahHeap::mark_incomplete_marking_context() { + _marking_context->mark_incomplete(); +} + +inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { + assert (_marking_context->is_complete()," sanity"); + return _marking_context; +} + +inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { + return _marking_context; +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2020-01-17 17:10:08.594130563 +0100 @@ -0,0 +1,700 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shared/space.inline.hpp" +#include "jfr/jfrEvents.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/resourceArea.hpp" +#include "memory/universe.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" +#include "runtime/safepoint.hpp" + +size_t ShenandoahHeapRegion::RegionCount = 0; +size_t ShenandoahHeapRegion::RegionSizeBytes = 0; +size_t ShenandoahHeapRegion::RegionSizeWords = 0; +size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; +size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; +size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; +size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; +size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; +size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; +size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; +size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; + +ShenandoahHeapRegion::PaddedAllocSeqNum ShenandoahHeapRegion::_alloc_seq_num; + +ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, + size_t size_words, size_t index, bool committed) : + _heap(heap), + _reserved(MemRegion(start, size_words)), + _region_number(index), + _new_top(NULL), + _empty_time(os::elapsedTime()), + _state(committed ? _empty_committed : _empty_uncommitted), + _tlab_allocs(0), + _gclab_allocs(0), + _shared_allocs(0), + _seqnum_first_alloc_mutator(0), + _seqnum_first_alloc_gc(0), + _seqnum_last_alloc_mutator(0), + _seqnum_last_alloc_gc(0), + _live_data(0), + _critical_pins(0) { + + ContiguousSpace::initialize(_reserved, true, committed); +} + +size_t ShenandoahHeapRegion::region_number() const { + return _region_number; +} + +void ShenandoahHeapRegion::report_illegal_transition(const char *method) { + ResourceMark rm; + stringStream ss; + ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method); + print_on(&ss); + fatal("%s", ss.as_string()); +} + +void ShenandoahHeapRegion::make_regular_allocation() { + _heap->assert_heaplock_owned_by_current_thread(); + + switch (_state) { + case _empty_uncommitted: + do_commit(); + case _empty_committed: + set_state(_regular); + case _regular: + case _pinned: + return; + default: + report_illegal_transition("regular allocation"); + } +} + +void ShenandoahHeapRegion::make_regular_bypass() { + _heap->assert_heaplock_owned_by_current_thread(); + assert (_heap->is_full_gc_in_progress() || _heap->is_degenerated_gc_in_progress(), + "only for full or degen GC"); + + switch (_state) { + case _empty_uncommitted: + do_commit(); + case _empty_committed: + case _cset: + case _humongous_start: + case _humongous_cont: + set_state(_regular); + return; + case _pinned_cset: + set_state(_pinned); + return; + case _regular: + case _pinned: + return; + default: + report_illegal_transition("regular bypass"); + } +} + +void ShenandoahHeapRegion::make_humongous_start() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _empty_uncommitted: + do_commit(); + case _empty_committed: + set_state(_humongous_start); + return; + default: + report_illegal_transition("humongous start allocation"); + } +} + +void ShenandoahHeapRegion::make_humongous_start_bypass() { + _heap->assert_heaplock_owned_by_current_thread(); + assert (_heap->is_full_gc_in_progress(), "only for full GC"); + + switch (_state) { + case _empty_committed: + case _regular: + case _humongous_start: + case _humongous_cont: + set_state(_humongous_start); + return; + default: + report_illegal_transition("humongous start bypass"); + } +} + +void ShenandoahHeapRegion::make_humongous_cont() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _empty_uncommitted: + do_commit(); + case _empty_committed: + set_state(_humongous_cont); + return; + default: + report_illegal_transition("humongous continuation allocation"); + } +} + +void ShenandoahHeapRegion::make_humongous_cont_bypass() { + _heap->assert_heaplock_owned_by_current_thread(); + assert (_heap->is_full_gc_in_progress(), "only for full GC"); + + switch (_state) { + case _empty_committed: + case _regular: + case _humongous_start: + case _humongous_cont: + set_state(_humongous_cont); + return; + default: + report_illegal_transition("humongous continuation bypass"); + } +} + +void ShenandoahHeapRegion::make_pinned() { + _heap->assert_heaplock_owned_by_current_thread(); + assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count()); + + switch (_state) { + case _regular: + set_state(_pinned); + case _pinned_cset: + case _pinned: + return; + case _humongous_start: + set_state(_pinned_humongous_start); + case _pinned_humongous_start: + return; + case _cset: + _state = _pinned_cset; + return; + default: + report_illegal_transition("pinning"); + } +} + +void ShenandoahHeapRegion::make_unpinned() { + _heap->assert_heaplock_owned_by_current_thread(); + assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count()); + + switch (_state) { + case _pinned: + set_state(_regular); + return; + case _regular: + case _humongous_start: + return; + case _pinned_cset: + set_state(_cset); + return; + case _pinned_humongous_start: + set_state(_humongous_start); + return; + default: + report_illegal_transition("unpinning"); + } +} + +void ShenandoahHeapRegion::make_cset() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _regular: + set_state(_cset); + case _cset: + return; + default: + report_illegal_transition("cset"); + } +} + +void ShenandoahHeapRegion::make_trash() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _cset: + // Reclaiming cset regions + case _humongous_start: + case _humongous_cont: + // Reclaiming humongous regions + case _regular: + // Immediate region reclaim + set_state(_trash); + return; + default: + report_illegal_transition("trashing"); + } +} + +void ShenandoahHeapRegion::make_trash_immediate() { + make_trash(); + + // On this path, we know there are no marked objects in the region, + // tell marking context about it to bypass bitmap resets. + _heap->complete_marking_context()->reset_top_bitmap(this); +} + +void ShenandoahHeapRegion::make_empty() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _trash: + set_state(_empty_committed); + _empty_time = os::elapsedTime(); + return; + default: + report_illegal_transition("emptying"); + } +} + +void ShenandoahHeapRegion::make_uncommitted() { + _heap->assert_heaplock_owned_by_current_thread(); + switch (_state) { + case _empty_committed: + do_uncommit(); + set_state(_empty_uncommitted); + return; + default: + report_illegal_transition("uncommiting"); + } +} + +void ShenandoahHeapRegion::make_committed_bypass() { + _heap->assert_heaplock_owned_by_current_thread(); + assert (_heap->is_full_gc_in_progress(), "only for full GC"); + + switch (_state) { + case _empty_uncommitted: + do_commit(); + set_state(_empty_committed); + return; + default: + report_illegal_transition("commit bypass"); + } +} + +void ShenandoahHeapRegion::clear_live_data() { + OrderAccess::release_store_fence(&_live_data, 0); +} + +void ShenandoahHeapRegion::reset_alloc_metadata() { + _tlab_allocs = 0; + _gclab_allocs = 0; + _shared_allocs = 0; + _seqnum_first_alloc_mutator = 0; + _seqnum_last_alloc_mutator = 0; + _seqnum_first_alloc_gc = 0; + _seqnum_last_alloc_gc = 0; +} + +void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() { + if (used() > 0) { + _tlab_allocs = 0; + _gclab_allocs = 0; + _shared_allocs = used() >> LogHeapWordSize; + uint64_t next = _alloc_seq_num.value++; + _seqnum_first_alloc_mutator = next; + _seqnum_last_alloc_mutator = next; + _seqnum_first_alloc_gc = 0; + _seqnum_last_alloc_gc = 0; + } else { + reset_alloc_metadata(); + } +} + +size_t ShenandoahHeapRegion::get_shared_allocs() const { + return _shared_allocs * HeapWordSize; +} + +size_t ShenandoahHeapRegion::get_tlab_allocs() const { + return _tlab_allocs * HeapWordSize; +} + +size_t ShenandoahHeapRegion::get_gclab_allocs() const { + return _gclab_allocs * HeapWordSize; +} + +void ShenandoahHeapRegion::set_live_data(size_t s) { + assert(Thread::current()->is_VM_thread(), "by VM thread"); + _live_data = (s >> LogHeapWordSize); +} + +size_t ShenandoahHeapRegion::get_live_data_words() const { + return OrderAccess::load_acquire(&_live_data); +} + +size_t ShenandoahHeapRegion::get_live_data_bytes() const { + return get_live_data_words() * HeapWordSize; +} + +bool ShenandoahHeapRegion::has_live() const { + return get_live_data_words() != 0; +} + +size_t ShenandoahHeapRegion::garbage() const { + assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT, + get_live_data_bytes(), used()); + + size_t result = used() - get_live_data_bytes(); + return result; +} + +void ShenandoahHeapRegion::print_on(outputStream* st) const { + st->print("|"); + st->print(SIZE_FORMAT_W(5), this->_region_number); + + switch (_state) { + case _empty_uncommitted: + st->print("|EU "); + break; + case _empty_committed: + st->print("|EC "); + break; + case _regular: + st->print("|R "); + break; + case _humongous_start: + st->print("|H "); + break; + case _pinned_humongous_start: + st->print("|HP "); + break; + case _humongous_cont: + st->print("|HC "); + break; + case _cset: + st->print("|CS "); + break; + case _trash: + st->print("|T "); + break; + case _pinned: + st->print("|P "); + break; + case _pinned_cset: + st->print("|CSP"); + break; + default: + ShouldNotReachHere(); + } + st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), + p2i(bottom()), p2i(top()), p2i(end())); + st->print("|TAMS " INTPTR_FORMAT_W(12), + p2i(_heap->marking_context()->top_at_mark_start(const_cast(this)))); + st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); + st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); + st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); + st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); + st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); + st->print("|CP " SIZE_FORMAT_W(3), pin_count()); + st->print("|SN " UINT64_FORMAT_X_W(12) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8), + seqnum_first_alloc_mutator(), seqnum_last_alloc_mutator(), + seqnum_first_alloc_gc(), seqnum_last_alloc_gc()); + st->cr(); +} + +void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) { + if (!is_active()) return; + if (is_humongous()) { + oop_iterate_humongous(blk); + } else { + oop_iterate_objects(blk); + } +} + +void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) { + assert(! is_humongous(), "no humongous region here"); + HeapWord* obj_addr = bottom(); + HeapWord* t = top(); + // Could call objects iterate, but this is easier. + while (obj_addr < t) { + oop obj = oop(obj_addr); + obj_addr += obj->oop_iterate_size(blk); + } +} + +void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) { + assert(is_humongous(), "only humongous region here"); + // Find head. + ShenandoahHeapRegion* r = humongous_start_region(); + assert(r->is_humongous_start(), "need humongous head here"); + oop obj = oop(r->bottom()); + obj->oop_iterate(blk, MemRegion(bottom(), top())); +} + +ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { + assert(is_humongous(), "Must be a part of the humongous region"); + size_t reg_num = region_number(); + ShenandoahHeapRegion* r = const_cast(this); + while (!r->is_humongous_start()) { + assert(reg_num > 0, "Sanity"); + reg_num --; + r = _heap->get_region(reg_num); + assert(r->is_humongous(), "Must be a part of the humongous region"); + } + assert(r->is_humongous_start(), "Must be"); + return r; +} + +void ShenandoahHeapRegion::recycle() { + ContiguousSpace::clear(false); + if (ZapUnusedHeapArea) { + ContiguousSpace::mangle_unused_area_complete(); + } + clear_live_data(); + + reset_alloc_metadata(); + + _heap->marking_context()->reset_top_at_mark_start(this); + + make_empty(); +} + +HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { + assert(MemRegion(bottom(), end()).contains(p), + "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", + p2i(p), p2i(bottom()), p2i(end())); + if (p >= top()) { + return top(); + } else { + HeapWord* last = bottom(); + HeapWord* cur = last; + while (cur <= p) { + last = cur; + cur += oop(cur)->size(); + } + shenandoah_assert_correct(NULL, oop(last)); + return last; + } +} + +void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { + // Absolute minimums we should not ever break. + static const size_t MIN_REGION_SIZE = 256*K; + + if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) { + FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE); + } + + size_t region_size; + if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { + if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { + err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " + "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), + MIN_NUM_REGIONS, + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); + } + if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { + err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); + vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); + } + if (ShenandoahMinRegionSize < MinTLABSize) { + err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); + } + if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { + err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize), + byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); + vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); + } + if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { + err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); + } + + // We rapidly expand to max_heap_size in most scenarios, so that is the measure + // for usual heap sizes. Do not depend on initial_heap_size here. + region_size = max_heap_size / ShenandoahTargetNumRegions; + + // Now make sure that we don't go over or under our limits. + region_size = MAX2(ShenandoahMinRegionSize, region_size); + region_size = MIN2(ShenandoahMaxRegionSize, region_size); + + } else { + if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) { + err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " + "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), + MIN_NUM_REGIONS, + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); + } + if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { + err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize), + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); + } + if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { + err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize), + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); + vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); + } + region_size = ShenandoahHeapRegionSize; + } + + // Make sure region size is at least one large page, if enabled. + // Otherwise, uncommitting one region may falsely uncommit the adjacent + // regions too. + // Also see shenandoahArguments.cpp, where it handles UseLargePages. + if (UseLargePages && ShenandoahUncommit) { + region_size = MAX2(region_size, os::large_page_size()); + } + + int region_size_log = log2_long((jlong) region_size); + // Recalculate the region size to make sure it's a power of + // 2. This means that region_size is the largest power of 2 that's + // <= what we've calculated so far. + region_size = size_t(1) << region_size_log; + + // Now, set up the globals. + guarantee(RegionSizeBytesShift == 0, "we should only set it once"); + RegionSizeBytesShift = (size_t)region_size_log; + + guarantee(RegionSizeWordsShift == 0, "we should only set it once"); + RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; + + guarantee(RegionSizeBytes == 0, "we should only set it once"); + RegionSizeBytes = region_size; + RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; + assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity"); + + guarantee(RegionSizeWordsMask == 0, "we should only set it once"); + RegionSizeWordsMask = RegionSizeWords - 1; + + guarantee(RegionSizeBytesMask == 0, "we should only set it once"); + RegionSizeBytesMask = RegionSizeBytes - 1; + + guarantee(RegionCount == 0, "we should only set it once"); + RegionCount = max_heap_size / RegionSizeBytes; + guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions"); + + guarantee(HumongousThresholdWords == 0, "we should only set it once"); + HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; + HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment); + assert (HumongousThresholdWords <= RegionSizeWords, "sanity"); + + guarantee(HumongousThresholdBytes == 0, "we should only set it once"); + HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; + assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity"); + + // The rationale for trimming the TLAB sizes has to do with the raciness in + // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah + // about next free size, gets the answer for region #N, goes away for a while, then + // tries to allocate in region #N, and fail because some other thread have claimed part + // of the region #N, and then the freeset allocation code has to retire the region #N, + // before moving the allocation to region #N+1. + // + // The worst case realizes when "answer" is "region size", which means it could + // prematurely retire an entire region. Having smaller TLABs does not fix that + // completely, but reduces the probability of too wasteful region retirement. + // With current divisor, we will waste no more than 1/8 of region size in the worst + // case. This also has a secondary effect on collection set selection: even under + // the race, the regions would be at least 7/8 used, which allows relying on + // "used" - "live" for cset selection. Otherwise, we can get the fragmented region + // below the garbage threshold that would never be considered for collection. + // + // The whole thing is mitigated if Elastic TLABs are enabled. + // + guarantee(MaxTLABSizeWords == 0, "we should only set it once"); + MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords); + MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment); + + guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); + MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize; + assert (MaxTLABSizeBytes > MinTLABSize, "should be larger"); + + log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s", + RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes)); + log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes)); + log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes)); +} + +void ShenandoahHeapRegion::do_commit() { + if (!_heap->is_heap_region_special() && !os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) { + report_java_out_of_memory("Unable to commit region"); + } + if (!_heap->commit_bitmap_slice(this)) { + report_java_out_of_memory("Unable to commit bitmaps for region"); + } + _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); +} + +void ShenandoahHeapRegion::do_uncommit() { + if (!_heap->is_heap_region_special() && !os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) { + report_java_out_of_memory("Unable to uncommit region"); + } + if (!_heap->uncommit_bitmap_slice(this)) { + report_java_out_of_memory("Unable to uncommit bitmaps for region"); + } + _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); +} + +void ShenandoahHeapRegion::set_state(RegionState to) { + EventShenandoahHeapRegionStateChange evt; + if (evt.should_commit()){ + evt.set_index(region_number()); + evt.set_start((uintptr_t)bottom()); + evt.set_used(used()); + evt.set_from(_state); + evt.set_to(to); + evt.commit(); + } + _state = to; +} + +void ShenandoahHeapRegion::record_pin() { + Atomic::add((size_t)1, &_critical_pins); +} + +void ShenandoahHeapRegion::record_unpin() { + assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", region_number()); + Atomic::sub((size_t)1, &_critical_pins); +} + +size_t ShenandoahHeapRegion::pin_count() const { + return Atomic::load(&_critical_pins); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp 2020-01-17 17:10:09.201130529 +0100 @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP + +#include "gc/shared/space.hpp" +#include "gc/shenandoah/shenandoahAllocRequest.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahPacer.hpp" +#include "memory/universe.hpp" +#include "utilities/sizes.hpp" + +class VMStructs; +class ShenandoahHeapRegionStateConstant; + +class ShenandoahHeapRegion : public ContiguousSpace { + friend class VMStructs; + friend class ShenandoahHeapRegionStateConstant; +private: + /* + Region state is described by a state machine. Transitions are guarded by + heap lock, which allows changing the state of several regions atomically. + Region states can be logically aggregated in groups. + + "Empty": + ................................................................. + . . + . . + . Uncommitted <------- Committed <------------------------\ + . | | . | + . \---------v-----------/ . | + . | . | + .........................|....................................... | + | | + "Active": | | + .........................|....................................... | + . | . | + . /-----------------^-------------------\ . | + . | | . | + . v v "Humongous": . | + . Regular ---\-----\ ..................O................ . | + . | ^ | | . | . . | + . | | | | . *---------\ . . | + . v | | | . v v . . | + . Pinned Cset | . HStart <--> H/Start H/Cont . . | + . ^ / | | . Pinned v | . . | + . | / | | . *<--------/ . . | + . | v | | . | . . | + . CsetPinned | | ..................O................ . | + . | | | . | + . \-----\---v-------------------/ . | + . | . | + .........................|....................................... | + | | + "Trash": | | + .........................|....................................... | + . | . | + . v . | + . Trash ---------------------------------------/ + . . + . . + ................................................................. + + Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} + to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. + + Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, + and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows + quick reclamation without actual cleaning up. + + Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. + Can be done asynchronously and in bulk. + + Note how internal transitions disallow logic bugs: + a) No region can go Empty, unless properly reclaimed/recycled; + b) No region can go Uncommitted, unless reclaimed/recycled first; + c) Only Regular regions can go to CSet; + d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; + e) Pinned cannot go CSet, thus it never moves; + f) Humongous cannot be used for regular allocations; + g) Humongous cannot go CSet, thus it never moves; + h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should + follow associated humongous starts, not pinnable/movable by themselves); + i) Empty cannot go Trash, avoiding useless work; + j) ... + */ + + enum RegionState { + _empty_uncommitted, // region is empty and has memory uncommitted + _empty_committed, // region is empty and has memory committed + _regular, // region is for regular allocations + _humongous_start, // region is the humongous start + _humongous_cont, // region is the humongous continuation + _pinned_humongous_start, // region is both humongous start and pinned + _cset, // region is in collection set + _pinned, // region is pinned + _pinned_cset, // region is pinned and in cset (evac failure path) + _trash, // region contains only trash + _REGION_STATES_NUM // last + }; + + static const char* region_state_to_string(RegionState s) { + switch (s) { + case _empty_uncommitted: return "Empty Uncommitted"; + case _empty_committed: return "Empty Committed"; + case _regular: return "Regular"; + case _humongous_start: return "Humongous Start"; + case _humongous_cont: return "Humongous Continuation"; + case _pinned_humongous_start: return "Humongous Start, Pinned"; + case _cset: return "Collection Set"; + case _pinned: return "Pinned"; + case _pinned_cset: return "Collection Set, Pinned"; + case _trash: return "Trash"; + default: + ShouldNotReachHere(); + return ""; + } + } + + // This method protects from accidental changes in enum order: + int region_state_to_ordinal(RegionState s) const { + switch (s) { + case _empty_uncommitted: return 0; + case _empty_committed: return 1; + case _regular: return 2; + case _humongous_start: return 3; + case _humongous_cont: return 4; + case _cset: return 5; + case _pinned: return 6; + case _trash: return 7; + case _pinned_cset: return 8; + case _pinned_humongous_start: return 9; + default: + ShouldNotReachHere(); + return -1; + } + } + + void report_illegal_transition(const char* method); + +public: + static const int region_states_num() { + return _REGION_STATES_NUM; + } + + // Allowed transitions from the outside code: + void make_regular_allocation(); + void make_regular_bypass(); + void make_humongous_start(); + void make_humongous_cont(); + void make_humongous_start_bypass(); + void make_humongous_cont_bypass(); + void make_pinned(); + void make_unpinned(); + void make_cset(); + void make_trash(); + void make_trash_immediate(); + void make_empty(); + void make_uncommitted(); + void make_committed_bypass(); + + // Individual states: + bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } + bool is_empty_committed() const { return _state == _empty_committed; } + bool is_regular() const { return _state == _regular; } + bool is_humongous_continuation() const { return _state == _humongous_cont; } + + // Participation in logical groups: + bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } + bool is_active() const { return !is_empty() && !is_trash(); } + bool is_trash() const { return _state == _trash; } + bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } + bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } + bool is_committed() const { return !is_empty_uncommitted(); } + bool is_cset() const { return _state == _cset || _state == _pinned_cset; } + bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } + + // Macro-properties: + bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } + bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } + + RegionState state() const { return _state; } + int state_ordinal() const { return region_state_to_ordinal(_state); } + + void record_pin(); + void record_unpin(); + size_t pin_count() const; + +private: + static size_t RegionCount; + static size_t RegionSizeBytes; + static size_t RegionSizeWords; + static size_t RegionSizeBytesShift; + static size_t RegionSizeWordsShift; + static size_t RegionSizeBytesMask; + static size_t RegionSizeWordsMask; + static size_t HumongousThresholdBytes; + static size_t HumongousThresholdWords; + static size_t MaxTLABSizeBytes; + static size_t MaxTLABSizeWords; + + // Global allocation counter, increased for each allocation under Shenandoah heap lock. + // Padded to avoid false sharing with the read-only fields above. + struct PaddedAllocSeqNum { + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t)); + uint64_t value; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + PaddedAllocSeqNum() { + // start with 1, reserve 0 for uninitialized value + value = 1; + } + }; + + static PaddedAllocSeqNum _alloc_seq_num; + + // Never updated fields + ShenandoahHeap* _heap; + MemRegion _reserved; + size_t _region_number; + + // Rarely updated fields + HeapWord* _new_top; + double _empty_time; + + // Seldom updated fields + RegionState _state; + + // Frequently updated fields + size_t _tlab_allocs; + size_t _gclab_allocs; + size_t _shared_allocs; + + uint64_t _seqnum_first_alloc_mutator; + uint64_t _seqnum_first_alloc_gc; + uint64_t _seqnum_last_alloc_mutator; + uint64_t _seqnum_last_alloc_gc; + + volatile size_t _live_data; + volatile size_t _critical_pins; + + // Claim some space at the end to protect next region + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed); + + static const size_t MIN_NUM_REGIONS = 10; + + static void setup_sizes(size_t max_heap_size); + + double empty_time() { + return _empty_time; + } + + inline static size_t required_regions(size_t bytes) { + return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); + } + + inline static size_t region_count() { + return ShenandoahHeapRegion::RegionCount; + } + + inline static size_t region_size_bytes() { + return ShenandoahHeapRegion::RegionSizeBytes; + } + + inline static size_t region_size_words() { + return ShenandoahHeapRegion::RegionSizeWords; + } + + inline static size_t region_size_bytes_shift() { + return ShenandoahHeapRegion::RegionSizeBytesShift; + } + + inline static size_t region_size_words_shift() { + return ShenandoahHeapRegion::RegionSizeWordsShift; + } + + inline static size_t region_size_bytes_mask() { + return ShenandoahHeapRegion::RegionSizeBytesMask; + } + + inline static size_t region_size_words_mask() { + return ShenandoahHeapRegion::RegionSizeWordsMask; + } + + // Convert to jint with sanity checking + inline static jint region_size_bytes_jint() { + assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); + return (jint)ShenandoahHeapRegion::RegionSizeBytes; + } + + // Convert to jint with sanity checking + inline static jint region_size_words_jint() { + assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); + return (jint)ShenandoahHeapRegion::RegionSizeWords; + } + + // Convert to jint with sanity checking + inline static jint region_size_bytes_shift_jint() { + assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); + return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; + } + + // Convert to jint with sanity checking + inline static jint region_size_words_shift_jint() { + assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); + return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; + } + + inline static size_t humongous_threshold_bytes() { + return ShenandoahHeapRegion::HumongousThresholdBytes; + } + + inline static size_t humongous_threshold_words() { + return ShenandoahHeapRegion::HumongousThresholdWords; + } + + inline static size_t max_tlab_size_bytes() { + return ShenandoahHeapRegion::MaxTLABSizeBytes; + } + + inline static size_t max_tlab_size_words() { + return ShenandoahHeapRegion::MaxTLABSizeWords; + } + + static uint64_t seqnum_current_alloc() { + // Last used seq number + return _alloc_seq_num.value - 1; + } + + size_t region_number() const; + + // Allocation (return NULL if full) + inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); + + HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL) + + void clear_live_data(); + void set_live_data(size_t s); + + // Increase live data for newly allocated region + inline void increase_live_data_alloc_words(size_t s); + + // Increase live data for region scanned with GC + inline void increase_live_data_gc_words(size_t s); + + bool has_live() const; + size_t get_live_data_bytes() const; + size_t get_live_data_words() const; + + void print_on(outputStream* st) const; + + size_t garbage() const; + + void recycle(); + + void oop_iterate(OopIterateClosure* cl); + + HeapWord* block_start_const(const void* p) const; + + bool in_collection_set() const; + + // Find humongous start region that this region belongs to + ShenandoahHeapRegion* humongous_start_region() const; + + CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL); + void prepare_for_compaction(CompactPoint* cp) shenandoah_not_implemented; + void adjust_pointers() shenandoah_not_implemented; + void compact() shenandoah_not_implemented; + + void set_new_top(HeapWord* new_top) { _new_top = new_top; } + HeapWord* new_top() const { return _new_top; } + + inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); + void reset_alloc_metadata_to_shared(); + void reset_alloc_metadata(); + size_t get_shared_allocs() const; + size_t get_tlab_allocs() const; + size_t get_gclab_allocs() const; + + uint64_t seqnum_first_alloc() const { + if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc; + if (_seqnum_first_alloc_gc == 0) return _seqnum_first_alloc_mutator; + return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc); + } + + uint64_t seqnum_last_alloc() const { + return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc); + } + + uint64_t seqnum_first_alloc_mutator() const { + return _seqnum_first_alloc_mutator; + } + + uint64_t seqnum_last_alloc_mutator() const { + return _seqnum_last_alloc_mutator; + } + + uint64_t seqnum_first_alloc_gc() const { + return _seqnum_first_alloc_gc; + } + + uint64_t seqnum_last_alloc_gc() const { + return _seqnum_last_alloc_gc; + } + +private: + void do_commit(); + void do_uncommit(); + + void oop_iterate_objects(OopIterateClosure* cl); + void oop_iterate_humongous(OopIterateClosure* cl); + + inline void internal_increase_live_data(size_t s); + + void set_state(RegionState to); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp 2020-01-17 17:10:09.814130495 +0100 @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahPacer.inline.hpp" +#include "runtime/atomic.hpp" + +HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) { + _heap->assert_heaplock_or_safepoint(); + + assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size); + + HeapWord* obj = top(); + if (pointer_delta(end(), obj) >= size) { + make_regular_allocation(); + adjust_alloc_metadata(type, size); + + HeapWord* new_top = obj + size; + set_top(new_top); + + assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top)); + assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj)); + + return obj; + } else { + return NULL; + } +} + +inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) { + bool is_first_alloc = (top() == bottom()); + + switch (type) { + case ShenandoahAllocRequest::_alloc_shared: + case ShenandoahAllocRequest::_alloc_tlab: + _seqnum_last_alloc_mutator = _alloc_seq_num.value++; + if (is_first_alloc) { + assert (_seqnum_first_alloc_mutator == 0, "Region " SIZE_FORMAT " metadata is correct", _region_number); + _seqnum_first_alloc_mutator = _seqnum_last_alloc_mutator; + } + break; + case ShenandoahAllocRequest::_alloc_shared_gc: + case ShenandoahAllocRequest::_alloc_gclab: + _seqnum_last_alloc_gc = _alloc_seq_num.value++; + if (is_first_alloc) { + assert (_seqnum_first_alloc_gc == 0, "Region " SIZE_FORMAT " metadata is correct", _region_number); + _seqnum_first_alloc_gc = _seqnum_last_alloc_gc; + } + break; + default: + ShouldNotReachHere(); + } + + switch (type) { + case ShenandoahAllocRequest::_alloc_shared: + case ShenandoahAllocRequest::_alloc_shared_gc: + _shared_allocs += size; + break; + case ShenandoahAllocRequest::_alloc_tlab: + _tlab_allocs += size; + break; + case ShenandoahAllocRequest::_alloc_gclab: + _gclab_allocs += size; + break; + default: + ShouldNotReachHere(); + } +} + +inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) { + internal_increase_live_data(s); +} + +inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) { + internal_increase_live_data(s); + if (ShenandoahPacing) { + _heap->pacer()->report_mark(s); + } +} + +inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { + size_t new_live_data = Atomic::add(s, &_live_data); +#ifdef ASSERT + size_t live_bytes = new_live_data * HeapWordSize; + size_t used_bytes = used(); + assert(live_bytes <= used_bytes, + "can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes); +#endif +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp 2020-01-17 17:10:10.421130462 +0100 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegionCounters.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/perfData.inline.hpp" + +ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : + _last_sample_millis(0) +{ + if (UsePerfData && ShenandoahRegionSampling) { + EXCEPTION_MARK; + ResourceMark rm; + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t num_regions = heap->num_regions(); + const char* cns = PerfDataManager::name_space("shenandoah", "regions"); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); + strcpy(_name_space, cns); + + const char* cname = PerfDataManager::counter_name(_name_space, "timestamp"); + _timestamp = PerfDataManager::create_long_variable(SUN_GC, cname, PerfData::U_None, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "max_regions"); + PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, num_regions, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "region_size"); + PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, ShenandoahHeapRegion::region_size_bytes() >> 10, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "status"); + _status = PerfDataManager::create_long_variable(SUN_GC, cname, + PerfData::U_None, CHECK); + + _regions_data = NEW_C_HEAP_ARRAY(PerfVariable*, num_regions, mtGC); + for (uint i = 0; i < num_regions; i++) { + const char* reg_name = PerfDataManager::name_space(_name_space, "region", i); + const char* data_name = PerfDataManager::counter_name(reg_name, "data"); + const char* ns = PerfDataManager::ns_to_string(SUN_GC); + const char* fullname = PerfDataManager::counter_name(ns, data_name); + assert(!PerfDataManager::exists(fullname), "must not exist"); + _regions_data[i] = PerfDataManager::create_long_variable(SUN_GC, data_name, + PerfData::U_None, CHECK); + } + } +} + +ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() { + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); +} + +void ShenandoahHeapRegionCounters::update() { + if (ShenandoahRegionSampling) { + jlong current = os::javaTimeMillis(); + jlong last = _last_sample_millis; + if (current - last > ShenandoahRegionSamplingRate && + Atomic::cmpxchg(current, &_last_sample_millis, last) == last) { + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + jlong status = 0; + if (heap->is_concurrent_mark_in_progress()) status |= 1 << 0; + if (heap->is_evacuation_in_progress()) status |= 1 << 1; + if (heap->is_update_refs_in_progress()) status |= 1 << 2; + if (heap->is_concurrent_traversal_in_progress()) status |= 1 << 3; + _status->set_value(status); + + _timestamp->set_value(os::elapsed_counter()); + + size_t num_regions = heap->num_regions(); + + { + ShenandoahHeapLocker locker(heap->lock()); + size_t rs = ShenandoahHeapRegion::region_size_bytes(); + for (uint i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + jlong data = 0; + data |= ((100 * r->used() / rs) & PERCENT_MASK) << USED_SHIFT; + data |= ((100 * r->get_live_data_bytes() / rs) & PERCENT_MASK) << LIVE_SHIFT; + data |= ((100 * r->get_tlab_allocs() / rs) & PERCENT_MASK) << TLAB_SHIFT; + data |= ((100 * r->get_gclab_allocs() / rs) & PERCENT_MASK) << GCLAB_SHIFT; + data |= ((100 * r->get_shared_allocs() / rs) & PERCENT_MASK) << SHARED_SHIFT; + data |= (r->state_ordinal() & STATUS_MASK) << STATUS_SHIFT; + _regions_data[i]->set_value(data); + } + } + + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp 2020-01-17 17:10:11.033130428 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP + +#include "memory/allocation.hpp" + +/** + * This provides the following in JVMStat: + * + * constants: + * - sun.gc.shenandoah.regions.timestamp the timestamp for this sample + * - sun.gc.shenandoah.regions.max_regions maximum number of regions + * - sun.gc.shenandoah.regions.region_size size per region, in kilobytes + * + * variables: + * - sun.gc.shenandoah.regions.status current GC status: + * - bit 0 set when marking in progress + * - bit 1 set when evacuation in progress + * - bit 2 set when update refs in progress + * - bit 3 set when traversal in progress + * + * two variable counters per region, with $max_regions (see above) counters: + * - sun.gc.shenandoah.regions.region.$i.data + * where $ is the region number from 0 <= i < $max_regions + * + * .data is in the following format: + * - bits 0-6 used memory in percent + * - bits 7-13 live memory in percent + * - bits 14-20 tlab allocated memory in percent + * - bits 21-27 gclab allocated memory in percent + * - bits 28-34 shared allocated memory in percent + * - bits 35-41 + * - bits 42-50 + * - bits 51-57 + * - bits 58-63 status + * - bits describe the state as recorded in ShenandoahHeapRegion + */ +class ShenandoahHeapRegionCounters : public CHeapObj { +private: + static const jlong PERCENT_MASK = 0x7f; + static const jlong STATUS_MASK = 0x3f; + + static const jlong USED_SHIFT = 0; + static const jlong LIVE_SHIFT = 7; + static const jlong TLAB_SHIFT = 14; + static const jlong GCLAB_SHIFT = 21; + static const jlong SHARED_SHIFT = 28; + + static const jlong STATUS_SHIFT = 58; + + char* _name_space; + PerfLongVariable** _regions_data; + PerfLongVariable* _timestamp; + PerfLongVariable* _status; + volatile jlong _last_sample_millis; + +public: + ShenandoahHeapRegionCounters(); + ~ShenandoahHeapRegionCounters(); + void update(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp 2020-01-17 17:10:11.645130394 +0100 @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "runtime/atomic.hpp" +#include "utilities/copy.hpp" + +ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) : + _set(set), _heap(ShenandoahHeap::heap()), _current_index(0) {} + +void ShenandoahHeapRegionSetIterator::reset(const ShenandoahHeapRegionSet* const set) { + _set = set; + _current_index = 0; +} + +ShenandoahHeapRegionSet::ShenandoahHeapRegionSet() : + _heap(ShenandoahHeap::heap()), + _map_size(_heap->num_regions()), + _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), + _set_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)), + _biased_set_map(_set_map - ((uintx)_heap->base() >> _region_size_bytes_shift)), + _region_count(0) +{ + // Use 1-byte data type + STATIC_ASSERT(sizeof(jbyte) == 1); + + // Initialize cset map + Copy::zero_to_bytes(_set_map, _map_size); +} + +ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() { + FREE_C_HEAP_ARRAY(jbyte, _set_map); +} + +void ShenandoahHeapRegionSet::add_region(ShenandoahHeapRegion* r) { + assert(!is_in(r), "Already in collection set"); + _set_map[r->region_number()] = 1; + _region_count++; +} + +bool ShenandoahHeapRegionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) { + if (!is_in(r)) { + add_region(r); + return true; + } else { + return false; + } +} + +void ShenandoahHeapRegionSet::remove_region(ShenandoahHeapRegion* r) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Must be VMThread"); + assert(is_in(r), "Not in region set"); + _set_map[r->region_number()] = 0; + _region_count --; +} + +void ShenandoahHeapRegionSet::clear() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + Copy::zero_to_bytes(_set_map, _map_size); + + _region_count = 0; +} + +ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() { + size_t num_regions = _heap->num_regions(); + if (_current_index >= (jint)num_regions) { + return NULL; + } + + jint saved_current = _current_index; + size_t index = (size_t)saved_current; + + while(index < num_regions) { + if (_set->is_in(index)) { + jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); + assert(cur >= (jint)saved_current, "Must move forward"); + if (cur == saved_current) { + assert(_set->is_in(index), "Invariant"); + return _heap->get_region(index); + } else { + index = (size_t)cur; + saved_current = cur; + } + } else { + index ++; + } + } + return NULL; +} + +ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() { + size_t num_regions = _heap->num_regions(); + for (size_t index = (size_t)_current_index; index < num_regions; index ++) { + if (_set->is_in(index)) { + _current_index = (jint)(index + 1); + return _heap->get_region(index); + } + } + + return NULL; +} + +void ShenandoahHeapRegionSet::print_on(outputStream* out) const { + out->print_cr("Region Set : " SIZE_FORMAT "", count()); + + debug_only(size_t regions = 0;) + for (size_t index = 0; index < _heap->num_regions(); index ++) { + if (is_in(index)) { + _heap->get_region(index)->print_on(out); + debug_only(regions ++;) + } + } + assert(regions == count(), "Must match"); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp 2020-01-17 17:10:12.248130361 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP + +#include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" + +class ShenandoahHeapRegionSet; + +class ShenandoahHeapRegionSetIterator : public StackObj { +private: + const ShenandoahHeapRegionSet* _set; + ShenandoahHeap* const _heap; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint)); + volatile jint _current_index; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + // No implicit copying: iterators should be passed by reference to capture the state + ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSetIterator& that); + ShenandoahHeapRegionSetIterator& operator=(const ShenandoahHeapRegionSetIterator& o); + +public: + ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set); + + // Reset existing iterator to new set + void reset(const ShenandoahHeapRegionSet* const set); + + // MT version + ShenandoahHeapRegion* claim_next(); + + // Single-thread version + ShenandoahHeapRegion* next(); +}; + +class ShenandoahHeapRegionSet : public CHeapObj { + friend class ShenandoahHeap; +private: + ShenandoahHeap* const _heap; + size_t const _map_size; + size_t const _region_size_bytes_shift; + jbyte* const _set_map; + // Bias set map's base address for fast test if an oop is in set + jbyte* const _biased_set_map; + size_t _region_count; + +public: + ShenandoahHeapRegionSet(); + ~ShenandoahHeapRegionSet(); + + // Add region to set + void add_region(ShenandoahHeapRegion* r); + bool add_region_check_for_duplicates(ShenandoahHeapRegion* r); + + // Remove region from set + void remove_region(ShenandoahHeapRegion* r); + + size_t count() const { return _region_count; } + bool is_empty() const { return _region_count == 0; } + + inline bool is_in(ShenandoahHeapRegion* r) const; + inline bool is_in(size_t region_number) const; + inline bool is_in(HeapWord* p) const; + + void print_on(outputStream* out) const; + + void clear(); + +private: + jbyte* biased_map_address() const { + return _biased_set_map; + } +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp 2020-01-17 17:10:12.846130328 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP + +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" + +bool ShenandoahHeapRegionSet::is_in(size_t region_number) const { + assert(region_number < _heap->num_regions(), "Sanity"); + return _set_map[region_number] == 1; +} + +bool ShenandoahHeapRegionSet::is_in(ShenandoahHeapRegion* r) const { + return is_in(r->region_number()); +} + +bool ShenandoahHeapRegionSet::is_in(HeapWord* p) const { + assert(_heap->is_in(p), "Must be in the heap"); + uintx index = ((uintx) p) >> _region_size_bytes_shift; + // no need to subtract the bottom of the heap from p, + // _biased_set_map is biased + return _biased_set_map[index] == 1; +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp 2020-01-17 17:10:13.449130295 +0100 @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/gcCause.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) { + if (a._garbage > b._garbage) + return -1; + else if (a._garbage < b._garbage) + return 1; + else return 0; +} + +int ShenandoahHeuristics::compare_by_garbage_then_alloc_seq_ascending(RegionData a, RegionData b) { + int r = compare_by_garbage(a, b); + if (r != 0) { + return r; + } + return compare_by_alloc_seq_ascending(a, b); +} + +int ShenandoahHeuristics::compare_by_alloc_seq_ascending(RegionData a, RegionData b) { + if (a._seqnum_last_alloc == b._seqnum_last_alloc) + return 0; + else if (a._seqnum_last_alloc < b._seqnum_last_alloc) + return -1; + else return 1; +} + +int ShenandoahHeuristics::compare_by_alloc_seq_descending(RegionData a, RegionData b) { + return -compare_by_alloc_seq_ascending(a, b); +} + +ShenandoahHeuristics::ShenandoahHeuristics() : + _update_refs_early(false), + _update_refs_adaptive(false), + _region_data(NULL), + _region_data_size(0), + _degenerated_cycles_in_a_row(0), + _successful_cycles_in_a_row(0), + _bytes_in_cset(0), + _cycle_start(os::elapsedTime()), + _last_cycle_end(0), + _gc_times_learned(0), + _gc_time_penalties(0), + _gc_time_history(new TruncatedSeq(5)), + _metaspace_oom() +{ + if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 || + strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) { + _update_refs_early = true; + } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 || + strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) { + _update_refs_early = false; + } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) { + _update_refs_adaptive = true; + _update_refs_early = true; + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly); + } + + // No unloading during concurrent mark? Communicate that to heuristics + if (!ClassUnloadingWithConcurrentMark) { + FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0); + } +} + +ShenandoahHeuristics::~ShenandoahHeuristics() { + if (_region_data != NULL) { + FREE_C_HEAP_ARRAY(RegionGarbage, _region_data); + } +} + +ShenandoahHeuristics::RegionData* ShenandoahHeuristics::get_region_data_cache(size_t num) { + RegionData* res = _region_data; + if (res == NULL) { + res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC); + _region_data = res; + _region_data_size = num; + } else if (_region_data_size < num) { + res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC); + _region_data = res; + _region_data_size = num; + } + return res; +} + +void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { + assert(collection_set->count() == 0, "Must be empty"); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Check all pinned regions have updated status before choosing the collection set. + heap->assert_pinned_region_status(); + + // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away. + + size_t num_regions = heap->num_regions(); + + RegionData* candidates = get_region_data_cache(num_regions); + + size_t cand_idx = 0; + + size_t total_garbage = 0; + + size_t immediate_garbage = 0; + size_t immediate_regions = 0; + + size_t free = 0; + size_t free_regions = 0; + + ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); + + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = heap->get_region(i); + + size_t garbage = region->garbage(); + total_garbage += garbage; + + if (region->is_empty()) { + free_regions++; + free += ShenandoahHeapRegion::region_size_bytes(); + } else if (region->is_regular()) { + if (!region->has_live()) { + // We can recycle it right away and put it in the free set. + immediate_regions++; + immediate_garbage += garbage; + region->make_trash_immediate(); + } else { + // This is our candidate for later consideration. + candidates[cand_idx]._region = region; + candidates[cand_idx]._garbage = garbage; + cand_idx++; + } + } else if (region->is_humongous_start()) { + // Reclaim humongous regions here, and count them as the immediate garbage +#ifdef ASSERT + bool reg_live = region->has_live(); + bool bm_live = ctx->is_marked(oop(region->bottom())); + assert(reg_live == bm_live, + "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT, + BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); +#endif + if (!region->has_live()) { + heap->trash_humongous_region_at(region); + + // Count only the start. Continuations would be counted on "trash" path + immediate_regions++; + immediate_garbage += garbage; + } + } else if (region->is_trash()) { + // Count in just trashed collection set, during coalesced CM-with-UR + immediate_regions++; + immediate_garbage += garbage; + } + } + + // Step 2. Look back at garbage statistics, and decide if we want to collect anything, + // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. + + assert (immediate_garbage <= total_garbage, + "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), + byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); + + size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage); + + if (immediate_percent <= ShenandoahImmediateThreshold) { + choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); + collection_set->update_region_status(); + + size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage); + log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions", + byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()), + cset_percent, + byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()), + collection_set->count()); + } + + log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions", + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), + immediate_percent, immediate_regions); +} + +void ShenandoahHeuristics::record_gc_start() { + // Do nothing +} + +void ShenandoahHeuristics::record_gc_end() { + // Do nothing +} + +void ShenandoahHeuristics::record_cycle_start() { + _cycle_start = os::elapsedTime(); +} + +void ShenandoahHeuristics::record_cycle_end() { + _last_cycle_end = os::elapsedTime(); +} + +void ShenandoahHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) { + // Do nothing +} + +bool ShenandoahHeuristics::should_start_update_refs() { + return _update_refs_early; +} + +bool ShenandoahHeuristics::should_start_gc() const { + // Perform GC to cleanup metaspace + if (has_metaspace_oom()) { + // Some of vmTestbase/metaspace tests depend on following line to count GC cycles + log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); + return true; + } + + double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; + bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval); + if (periodic_gc) { + log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", + last_time_ms, ShenandoahGuaranteedGCInterval); + } + return periodic_gc; +} + +bool ShenandoahHeuristics::should_degenerate_cycle() { + return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold; +} + +void ShenandoahHeuristics::record_success_concurrent() { + _degenerated_cycles_in_a_row = 0; + _successful_cycles_in_a_row++; + + _gc_time_history->add(time_since_last_gc()); + _gc_times_learned++; + _gc_time_penalties -= MIN2(_gc_time_penalties, Concurrent_Adjust); +} + +void ShenandoahHeuristics::record_success_degenerated() { + _degenerated_cycles_in_a_row++; + _successful_cycles_in_a_row = 0; + _gc_time_penalties += Degenerated_Penalty; +} + +void ShenandoahHeuristics::record_success_full() { + _degenerated_cycles_in_a_row = 0; + _successful_cycles_in_a_row++; + _gc_time_penalties += Full_Penalty; +} + +void ShenandoahHeuristics::record_allocation_failure_gc() { + _bytes_in_cset = 0; +} + +void ShenandoahHeuristics::record_requested_gc() { + _bytes_in_cset = 0; + + // Assume users call System.gc() when external state changes significantly, + // which forces us to re-learn the GC timings and allocation rates. + _gc_times_learned = 0; +} + +bool ShenandoahHeuristics::can_process_references() { + if (ShenandoahRefProcFrequency == 0) return false; + return true; +} + +bool ShenandoahHeuristics::should_process_references() { + if (!can_process_references()) return false; + size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter(); + // Process references every Nth GC cycle. + return cycle % ShenandoahRefProcFrequency == 0; +} + +bool ShenandoahHeuristics::can_unload_classes() { + if (!ClassUnloading) return false; + return true; +} + +bool ShenandoahHeuristics::can_unload_classes_normal() { + if (!can_unload_classes()) return false; + if (has_metaspace_oom()) return true; + if (!ClassUnloadingWithConcurrentMark) return false; + if (ShenandoahUnloadClassesFrequency == 0) return false; + return true; +} + +bool ShenandoahHeuristics::should_unload_classes() { + if (!can_unload_classes_normal()) return false; + if (has_metaspace_oom()) return true; + size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter(); + // Unload classes every Nth GC cycle. + // This should not happen in the same cycle as process_references to amortize costs. + // Offsetting by one is enough to break the rendezvous when periods are equal. + // When periods are not equal, offsetting by one is just as good as any other guess. + return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0; +} + +void ShenandoahHeuristics::initialize() { + // Nothing to do by default. +} + +double ShenandoahHeuristics::time_since_last_gc() const { + return os::elapsedTime() - _cycle_start; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.hpp 2020-01-17 17:10:14.056130262 +0100 @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP + +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "memory/allocation.hpp" +#include "runtime/globals_extension.hpp" + +#define SHENANDOAH_ERGO_DISABLE_FLAG(name) \ + do { \ + if (FLAG_IS_DEFAULT(name) && (name)) { \ + log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \ + FLAG_SET_DEFAULT(name, false); \ + } \ + } while (0) + +#define SHENANDOAH_ERGO_ENABLE_FLAG(name) \ + do { \ + if (FLAG_IS_DEFAULT(name) && !(name)) { \ + log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \ + FLAG_SET_DEFAULT(name, true); \ + } \ + } while (0) + +#define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value) \ + do { \ + if (FLAG_IS_DEFAULT(name)) { \ + log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value); \ + FLAG_SET_DEFAULT(name, value); \ + } \ + } while (0) + +#define SHENANDOAH_CHECK_FLAG_SET(name) \ + do { \ + if (!name) { \ + err_msg message("Heuristics needs -XX:+" #name " to work correctly"); \ + vm_exit_during_initialization("Error", message); \ + } \ + } while (0) + +class ShenandoahCollectionSet; +class ShenandoahHeapRegion; + +class ShenandoahHeuristics : public CHeapObj { + static const intx Concurrent_Adjust = 1; // recover from penalties + static const intx Degenerated_Penalty = 10; // how much to penalize average GC duration history on Degenerated GC + static const intx Full_Penalty = 20; // how much to penalize average GC duration history on Full GC + +protected: + typedef struct { + ShenandoahHeapRegion* _region; + size_t _garbage; + uint64_t _seqnum_last_alloc; + } RegionData; + + bool _update_refs_early; + bool _update_refs_adaptive; + + RegionData* _region_data; + size_t _region_data_size; + + uint _degenerated_cycles_in_a_row; + uint _successful_cycles_in_a_row; + + size_t _bytes_in_cset; + + double _cycle_start; + double _last_cycle_end; + + size_t _gc_times_learned; + size_t _gc_time_penalties; + TruncatedSeq* _gc_time_history; + + // There may be many threads that contend to set this flag + ShenandoahSharedFlag _metaspace_oom; + + static int compare_by_garbage(RegionData a, RegionData b); + static int compare_by_garbage_then_alloc_seq_ascending(RegionData a, RegionData b); + static int compare_by_alloc_seq_ascending(RegionData a, RegionData b); + static int compare_by_alloc_seq_descending(RegionData a, RegionData b); + + RegionData* get_region_data_cache(size_t num); + + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) = 0; + +public: + ShenandoahHeuristics(); + virtual ~ShenandoahHeuristics(); + + void record_gc_start(); + + void record_gc_end(); + + void record_metaspace_oom() { _metaspace_oom.set(); } + void clear_metaspace_oom() { _metaspace_oom.unset(); } + bool has_metaspace_oom() const { return _metaspace_oom.is_set(); } + + virtual void record_cycle_start(); + + virtual void record_cycle_end(); + + virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs); + + virtual bool should_start_gc() const; + + virtual bool should_start_update_refs(); + + virtual bool should_degenerate_cycle(); + + virtual void record_success_concurrent(); + + virtual void record_success_degenerated(); + + virtual void record_success_full(); + + virtual void record_allocation_failure_gc(); + + virtual void record_requested_gc(); + + virtual void choose_collection_set(ShenandoahCollectionSet* collection_set); + + virtual bool can_process_references(); + virtual bool should_process_references(); + + virtual bool can_unload_classes(); + virtual bool can_unload_classes_normal(); + virtual bool should_unload_classes(); + + virtual const char* name() = 0; + virtual bool is_diagnostic() = 0; + virtual bool is_experimental() = 0; + virtual void initialize(); + + double time_since_last_gc() const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.cpp 2020-01-17 17:10:14.664130228 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahJfrSupport.hpp" +#include "jfr/jfrEvents.hpp" +#if INCLUDE_JFR +#include "jfr/metadata/jfrSerializer.hpp" +#endif + +#if INCLUDE_JFR + +class ShenandoahHeapRegionStateConstant : public JfrSerializer { + friend class ShenandoahHeapRegion; +public: + virtual void serialize(JfrCheckpointWriter& writer) { + static const u4 nof_entries = ShenandoahHeapRegion::region_states_num(); + writer.write_count(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(ShenandoahHeapRegion::region_state_to_string((ShenandoahHeapRegion::RegionState)i)); + } + } +}; + +void ShenandoahJFRSupport::register_jfr_type_serializers() { + JfrSerializer::register_serializer(TYPE_SHENANDOAHHEAPREGIONSTATE, + false, + true, + new ShenandoahHeapRegionStateConstant()); +} +#endif + +class ShenandoahDumpHeapRegionInfoClosure : public ShenandoahHeapRegionClosure { +public: + virtual void heap_region_do(ShenandoahHeapRegion* r) { + EventShenandoahHeapRegionInformation evt; + evt.set_index(r->region_number()); + evt.set_state((u8)r->state()); + evt.set_start((uintptr_t)r->bottom()); + evt.set_used(r->used()); + evt.commit(); + } +}; + +void VM_ShenandoahSendHeapRegionInfoEvents::doit() { + ShenandoahDumpHeapRegionInfoClosure c; + ShenandoahHeap::heap()->heap_region_iterate(&c); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.hpp 2020-01-17 17:10:15.275130194 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP + +#include "runtime/vmOperations.hpp" + +class VM_ShenandoahSendHeapRegionInfoEvents : public VM_Operation { +public: + virtual void doit(); + virtual VMOp_Type type() const { return VMOp_HeapIterateOperation; } +}; + +class ShenandoahJFRSupport { +public: + static void register_jfr_type_serializers(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp 2020-01-17 17:10:15.873130161 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP + +#include "memory/allocation.hpp" +#include "runtime/thread.hpp" + +class ShenandoahLock { +private: + enum LockState { unlocked = 0, locked = 1 }; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); + volatile int _state; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile Thread*)); + volatile Thread* _owner; + DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + ShenandoahLock() : _state(unlocked), _owner(NULL) {}; + + void lock() { +#ifdef ASSERT + assert(_owner != Thread::current(), "reentrant locking attempt, would deadlock"); +#endif + Thread::SpinAcquire(&_state, "Shenandoah Heap Lock"); +#ifdef ASSERT + assert(_state == locked, "must be locked"); + assert(_owner == NULL, "must not be owned"); + _owner = Thread::current(); +#endif + } + + void unlock() { +#ifdef ASSERT + assert (_owner == Thread::current(), "sanity"); + _owner = NULL; +#endif + Thread::SpinRelease(&_state); + } + +#ifdef ASSERT + void assert_owned_by_current_thread() { + assert(_state == locked, "must be locked"); + assert(_owner == Thread::current(), "must be owned by current thread"); + } + + void assert_not_owned_by_current_thread() { + assert(_owner != Thread::current(), "must be not owned by current thread"); + } + + void assert_owned_by_current_thread_or_safepoint() { + Thread* thr = Thread::current(); + assert((_state == locked && _owner == thr) || + (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()), + "must own heap lock or by VM thread at safepoint"); + } +#endif +}; + +class ShenandoahLocker : public StackObj { +private: + ShenandoahLock* const _lock; +public: + ShenandoahLocker(ShenandoahLock* lock) : _lock(lock) { + if (_lock != NULL) { + _lock->lock(); + } + } + + ~ShenandoahLocker() { + if (_lock != NULL) { + _lock->unlock(); + } + } +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHLOCK_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp 2020-01-17 17:10:16.472130128 +0100 @@ -0,0 +1,881 @@ +/* + * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "code/codeCache.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shenandoah/shenandoahForwarding.inline.hpp" +#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahMarkCompact.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahVMOperations.hpp" +#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "memory/metaspace.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/biasedLocking.hpp" +#include "runtime/thread.hpp" +#include "utilities/copy.hpp" +#include "utilities/growableArray.hpp" +#include "gc/shared/workgroup.hpp" + +ShenandoahMarkCompact::ShenandoahMarkCompact() : + _gc_timer(NULL), + _preserved_marks(new PreservedMarksSet(true)) {} + +void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { + _gc_timer = gc_timer; +} + +void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + if (ShenandoahVerify) { + heap->verifier()->verify_before_fullgc(); + } + + if (VerifyBeforeGC) { + Universe::verify(); + } + + heap->set_full_gc_in_progress(true); + + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); + heap->pre_full_gc_dump(_gc_timer); + } + + { + ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); + // Full GC is supposed to recover from any GC state: + + // a0. Remember if we have forwarded objects + bool has_forwarded_objects = heap->has_forwarded_objects(); + + // a1. Cancel evacuation, if in progress + if (heap->is_evacuation_in_progress()) { + heap->set_evacuation_in_progress(false); + } + assert(!heap->is_evacuation_in_progress(), "sanity"); + + // a2. Cancel update-refs, if in progress + if (heap->is_update_refs_in_progress()) { + heap->set_update_refs_in_progress(false); + } + assert(!heap->is_update_refs_in_progress(), "sanity"); + + // a3. Cancel concurrent traversal GC, if in progress + if (heap->is_concurrent_traversal_in_progress()) { + heap->traversal_gc()->reset(); + heap->set_concurrent_traversal_in_progress(false); + } + + // b. Cancel concurrent mark, if in progress + if (heap->is_concurrent_mark_in_progress()) { + heap->concurrent_mark()->cancel(); + heap->stop_concurrent_marking(); + } + assert(!heap->is_concurrent_mark_in_progress(), "sanity"); + + // c. Reset the bitmaps for new marking + heap->reset_mark_bitmap(); + assert(heap->marking_context()->is_bitmap_clear(), "sanity"); + assert(!heap->marking_context()->is_complete(), "sanity"); + + // d. Abandon reference discovery and clear all discovered references. + ReferenceProcessor* rp = heap->ref_processor(); + rp->disable_discovery(); + rp->abandon_partial_discovery(); + rp->verify_no_references_recorded(); + + // e. Set back forwarded objects bit back, in case some steps above dropped it. + heap->set_has_forwarded_objects(has_forwarded_objects); + + // f. Sync pinned region status from the CP marks + heap->sync_pinned_region_status(); + + // The rest of prologue: + BiasedLocking::preserve_marks(); + _preserved_marks->init(heap->workers()->active_workers()); + } + + heap->make_parsable(true); + + CodeCache::gc_prologue(); + + OrderAccess::fence(); + + phase1_mark_heap(); + + // Once marking is done, which may have fixed up forwarded objects, we can drop it. + // Coming out of Full GC, we would not have any forwarded objects. + // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. + heap->set_has_forwarded_objects(false); + + heap->set_full_gc_move_in_progress(true); + + // Setup workers for the rest + OrderAccess::fence(); + + // Initialize worker slices + ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); + for (uint i = 0; i < heap->max_workers(); i++) { + worker_slices[i] = new ShenandoahHeapRegionSet(); + } + + { + // The rest of code performs region moves, where region status is undefined + // until all phases run together. + ShenandoahHeapLocker lock(heap->lock()); + + phase2_calculate_target_addresses(worker_slices); + + OrderAccess::fence(); + + phase3_update_references(); + + phase4_compact_objects(worker_slices); + } + + { + // Epilogue + SharedRestorePreservedMarksTaskExecutor exec(heap->workers()); + _preserved_marks->restore(&exec); + BiasedLocking::restore_marks(); + _preserved_marks->reclaim(); + + CodeCache::gc_epilogue(); + } + + // Resize metaspace + MetaspaceGC::compute_new_size(); + + // Free worker slices + for (uint i = 0; i < heap->max_workers(); i++) { + delete worker_slices[i]; + } + FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); + + heap->set_full_gc_move_in_progress(false); + heap->set_full_gc_in_progress(false); + + if (ShenandoahVerify) { + heap->verifier()->verify_after_fullgc(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); + heap->post_full_gc_dump(_gc_timer); + } +} + +class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { +private: + ShenandoahMarkingContext* const _ctx; + +public: + ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion *r) { + _ctx->capture_top_at_mark_start(r); + r->clear_live_data(); + r->set_concurrent_iteration_safe_limit(r->top()); + } +}; + +void ShenandoahMarkCompact::phase1_mark_heap() { + GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); + ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahPrepareForMarkClosure cl; + heap->heap_region_iterate(&cl); + + ShenandoahConcurrentMark* cm = heap->concurrent_mark(); + + heap->set_process_references(heap->heuristics()->can_process_references()); + heap->set_unload_classes(heap->heuristics()->can_unload_classes()); + + ReferenceProcessor* rp = heap->ref_processor(); + // enable ("weak") refs discovery + rp->enable_discovery(true /*verify_no_refs*/); + rp->setup_policy(true); // forcefully purge all soft references + rp->set_active_mt_degree(heap->workers()->active_workers()); + + cm->update_roots(ShenandoahPhaseTimings::full_gc_roots); + cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots); + cm->finish_mark_from_roots(/* full_gc = */ true); + + heap->mark_complete_marking_context(); +} + +class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { +private: + PreservedMarks* const _preserved_marks; + ShenandoahHeap* const _heap; + GrowableArray& _empty_regions; + int _empty_regions_pos; + ShenandoahHeapRegion* _to_region; + ShenandoahHeapRegion* _from_region; + HeapWord* _compact_point; + +public: + ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, + GrowableArray& empty_regions, + ShenandoahHeapRegion* to_region) : + _preserved_marks(preserved_marks), + _heap(ShenandoahHeap::heap()), + _empty_regions(empty_regions), + _empty_regions_pos(0), + _to_region(to_region), + _from_region(NULL), + _compact_point(to_region->bottom()) {} + + void set_from_region(ShenandoahHeapRegion* from_region) { + _from_region = from_region; + } + + void finish_region() { + assert(_to_region != NULL, "should not happen"); + _to_region->set_new_top(_compact_point); + } + + bool is_compact_same_region() { + return _from_region == _to_region; + } + + int empty_regions_pos() { + return _empty_regions_pos; + } + + void do_object(oop p) { + assert(_from_region != NULL, "must set before work"); + assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); + assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); + + size_t obj_size = p->size(); + if (_compact_point + obj_size > _to_region->end()) { + finish_region(); + + // Object doesn't fit. Pick next empty region and start compacting there. + ShenandoahHeapRegion* new_to_region; + if (_empty_regions_pos < _empty_regions.length()) { + new_to_region = _empty_regions.at(_empty_regions_pos); + _empty_regions_pos++; + } else { + // Out of empty region? Compact within the same region. + new_to_region = _from_region; + } + + assert(new_to_region != _to_region, "must not reuse same to-region"); + assert(new_to_region != NULL, "must not be NULL"); + _to_region = new_to_region; + _compact_point = _to_region->bottom(); + } + + // Object fits into current region, record new location: + assert(_compact_point + obj_size <= _to_region->end(), "must fit"); + shenandoah_assert_not_forwarded(NULL, p); + _preserved_marks->push_if_necessary(p, p->mark_raw()); + p->forward_to(oop(_compact_point)); + _compact_point += obj_size; + } +}; + +class ShenandoahPrepareForCompactionTask : public AbstractGangTask { +private: + PreservedMarksSet* const _preserved_marks; + ShenandoahHeap* const _heap; + ShenandoahHeapRegionSet** const _worker_slices; + ShenandoahRegionIterator _heap_regions; + + ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) { + ShenandoahHeapRegion* from_region = _heap_regions.next(); + + // Look for next candidate for this slice: + while (from_region != NULL) { + // Empty region: get it into the slice to defragment the slice itself. + // We could have skipped this without violating correctness, but we really + // want to compact all live regions to the start of the heap, which sometimes + // means moving them into the fully empty regions. + if (from_region->is_empty()) break; + + // Can move the region, and this is not the humongous region. Humongous + // moves are special cased here, because their moves are handled separately. + if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break; + + from_region = _heap_regions.next(); + } + + if (from_region != NULL) { + assert(slice != NULL, "sanity"); + assert(!from_region->is_humongous(), "this path cannot handle humongous regions"); + assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact"); + slice->add_region(from_region); + } + + return from_region; + } + +public: + ShenandoahPrepareForCompactionTask(PreservedMarksSet* preserved_marks, ShenandoahHeapRegionSet** worker_slices) : + AbstractGangTask("Shenandoah Prepare For Compaction Task"), + _preserved_marks(preserved_marks), + _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { + } + + void work(uint worker_id) { + ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; + ShenandoahHeapRegion* from_region = next_from_region(slice); + // No work? + if (from_region == NULL) { + return; + } + + // Sliding compaction. Walk all regions in the slice, and compact them. + // Remember empty regions and reuse them as needed. + ResourceMark rm; + GrowableArray empty_regions((int)_heap->num_regions()); + ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); + while (from_region != NULL) { + cl.set_from_region(from_region); + if (from_region->has_live()) { + _heap->marked_object_iterate(from_region, &cl); + } + + // Compacted the region to somewhere else? From-region is empty then. + if (!cl.is_compact_same_region()) { + empty_regions.append(from_region); + } + from_region = next_from_region(slice); + } + cl.finish_region(); + + // Mark all remaining regions as empty + for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { + ShenandoahHeapRegion* r = empty_regions.at(pos); + r->set_new_top(r->bottom()); + } + } +}; + +void ShenandoahMarkCompact::calculate_target_humongous_objects() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Compute the new addresses for humongous objects. We need to do this after addresses + // for regular objects are calculated, and we know what regions in heap suffix are + // available for humongous moves. + // + // Scan the heap backwards, because we are compacting humongous regions towards the end. + // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide + // humongous start there. + // + // The complication is potential non-movable regions during the scan. If such region is + // detected, then sliding restarts towards that non-movable region. + + size_t to_begin = heap->num_regions(); + size_t to_end = heap->num_regions(); + + for (size_t c = heap->num_regions(); c > 0; c--) { + ShenandoahHeapRegion *r = heap->get_region(c - 1); + if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { + // To-region candidate: record this, and continue scan + to_begin = r->region_number(); + continue; + } + + if (r->is_humongous_start() && r->is_stw_move_allowed()) { + // From-region candidate: movable humongous region + oop old_obj = oop(r->bottom()); + size_t words_size = old_obj->size(); + size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); + + size_t start = to_end - num_regions; + + if (start >= to_begin && start != r->region_number()) { + // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. + _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw()); + old_obj->forward_to(oop(heap->get_region(start)->bottom())); + to_end = start; + continue; + } + } + + // Failed to fit. Scan starting from current region. + to_begin = r->region_number(); + to_end = r->region_number(); + } +} + +class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { +private: + ShenandoahHeap* const _heap; + +public: + ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} + void heap_region_do(ShenandoahHeapRegion* r) { + if (r->is_trash()) { + r->recycle(); + } + if (r->is_cset()) { + r->make_regular_bypass(); + } + if (r->is_empty_uncommitted()) { + r->make_committed_bypass(); + } + assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number()); + + // Record current region occupancy: this communicates empty regions are free + // to the rest of Full GC code. + r->set_new_top(r->top()); + } +}; + +class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { +private: + ShenandoahHeap* const _heap; + ShenandoahMarkingContext* const _ctx; + +public: + ShenandoahTrashImmediateGarbageClosure() : + _heap(ShenandoahHeap::heap()), + _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion* r) { + if (r->is_humongous_start()) { + oop humongous_obj = oop(r->bottom()); + if (!_ctx->is_marked(humongous_obj)) { + assert(!r->has_live(), + "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number()); + _heap->trash_humongous_region_at(r); + } else { + assert(r->has_live(), + "Region " SIZE_FORMAT " should have live", r->region_number()); + } + } else if (r->is_humongous_continuation()) { + // If we hit continuation, the non-live humongous starts should have been trashed already + assert(r->humongous_start_region()->has_live(), + "Region " SIZE_FORMAT " should have live", r->region_number()); + } else if (r->is_regular()) { + if (!r->has_live()) { + r->make_trash_immediate(); + } + } + } +}; + +void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { + GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); + ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // About to figure out which regions can be compacted, make sure pinning status + // had been updated in GC prologue. + heap->assert_pinned_region_status(); + + { + // Trash the immediately collectible regions before computing addresses + ShenandoahTrashImmediateGarbageClosure tigcl; + heap->heap_region_iterate(&tigcl); + + // Make sure regions are in good state: committed, active, clean. + // This is needed because we are potentially sliding the data through them. + ShenandoahEnsureHeapActiveClosure ecl; + heap->heap_region_iterate(&ecl); + } + + // Compute the new addresses for regular objects + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); + ShenandoahPrepareForCompactionTask prepare_task(_preserved_marks, worker_slices); + heap->workers()->run_task(&prepare_task); + } + + // Compute the new addresses for humongous objects + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); + calculate_target_humongous_objects(); + } +} + +class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { +private: + ShenandoahHeap* const _heap; + ShenandoahMarkingContext* const _ctx; + + template + inline void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + assert(_ctx->is_marked(obj), "must be marked"); + if (obj->is_forwarded()) { + oop forw = obj->forwardee(); + RawAccess::oop_store(p, forw); + } + } + } + +public: + ShenandoahAdjustPointersClosure() : + _heap(ShenandoahHeap::heap()), + _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} + + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { +private: + ShenandoahHeap* const _heap; + ShenandoahAdjustPointersClosure _cl; + +public: + ShenandoahAdjustPointersObjectClosure() : + _heap(ShenandoahHeap::heap()) { + } + void do_object(oop p) { + assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); + p->oop_iterate(&_cl); + } +}; + +class ShenandoahAdjustPointersTask : public AbstractGangTask { +private: + ShenandoahHeap* const _heap; + ShenandoahRegionIterator _regions; + +public: + ShenandoahAdjustPointersTask() : + AbstractGangTask("Shenandoah Adjust Pointers Task"), + _heap(ShenandoahHeap::heap()) { + } + + void work(uint worker_id) { + ShenandoahAdjustPointersObjectClosure obj_cl; + ShenandoahHeapRegion* r = _regions.next(); + while (r != NULL) { + if (!r->is_humongous_continuation() && r->has_live()) { + _heap->marked_object_iterate(r, &obj_cl); + } + r = _regions.next(); + } + } +}; + +class ShenandoahAdjustRootPointersTask : public AbstractGangTask { +private: + ShenandoahRootAdjuster* _rp; + PreservedMarksSet* _preserved_marks; +public: + ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : + AbstractGangTask("Shenandoah Adjust Root Pointers Task"), + _rp(rp), + _preserved_marks(preserved_marks) {} + + void work(uint worker_id) { + ShenandoahAdjustPointersClosure cl; + _rp->roots_do(worker_id, &cl); + _preserved_marks->get(worker_id)->adjust_during_full_gc(); + } +}; + +void ShenandoahMarkCompact::phase3_update_references() { + GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); + ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + WorkGang* workers = heap->workers(); + uint nworkers = workers->active_workers(); + { +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_roots); + ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); + workers->run_task(&task); +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif + } + + ShenandoahAdjustPointersTask adjust_pointers_task; + workers->run_task(&adjust_pointers_task); +} + +class ShenandoahCompactObjectsClosure : public ObjectClosure { +private: + ShenandoahHeap* const _heap; + uint const _worker_id; + +public: + ShenandoahCompactObjectsClosure(uint worker_id) : + _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} + + void do_object(oop p) { + assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); + size_t size = (size_t)p->size(); + if (p->is_forwarded()) { + HeapWord* compact_from = (HeapWord*) p; + HeapWord* compact_to = (HeapWord*) p->forwardee(); + Copy::aligned_conjoint_words(compact_from, compact_to, size); + oop new_obj = oop(compact_to); + new_obj->init_mark_raw(); + } + } +}; + +class ShenandoahCompactObjectsTask : public AbstractGangTask { +private: + ShenandoahHeap* const _heap; + ShenandoahHeapRegionSet** const _worker_slices; + +public: + ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : + AbstractGangTask("Shenandoah Compact Objects Task"), + _heap(ShenandoahHeap::heap()), + _worker_slices(worker_slices) { + } + + void work(uint worker_id) { + ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); + + ShenandoahCompactObjectsClosure cl(worker_id); + ShenandoahHeapRegion* r = slice.next(); + while (r != NULL) { + assert(!r->is_humongous(), "must not get humongous regions here"); + if (r->has_live()) { + _heap->marked_object_iterate(r, &cl); + } + r->set_top(r->new_top()); + r = slice.next(); + } + } +}; + +class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahHeap* const _heap; + size_t _live; + +public: + ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { + _heap->free_set()->clear(); + } + + void heap_region_do(ShenandoahHeapRegion* r) { + assert (!r->is_cset(), "cset regions should have been demoted already"); + + // Need to reset the complete-top-at-mark-start pointer here because + // the complete marking bitmap is no longer valid. This ensures + // size-based iteration in marked_object_iterate(). + // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip + // pinned regions. + if (!r->is_pinned()) { + _heap->complete_marking_context()->reset_top_at_mark_start(r); + } + + size_t live = r->used(); + + // Make empty regions that have been allocated into regular + if (r->is_empty() && live > 0) { + r->make_regular_bypass(); + } + + // Reclaim regular regions that became empty + if (r->is_regular() && live == 0) { + r->make_trash(); + } + + // Recycle all trash regions + if (r->is_trash()) { + live = 0; + r->recycle(); + } + + r->set_live_data(live); + r->reset_alloc_metadata_to_shared(); + _live += live; + } + + size_t get_live() { + return _live; + } +}; + +void ShenandoahMarkCompact::compact_humongous_objects() { + // Compact humongous regions, based on their fwdptr objects. + // + // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, + // humongous regions are already compacted, and do not require further moves, which alleviates + // sliding costs. We may consider doing this in parallel in future. + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + for (size_t c = heap->num_regions(); c > 0; c--) { + ShenandoahHeapRegion* r = heap->get_region(c - 1); + if (r->is_humongous_start()) { + oop old_obj = oop(r->bottom()); + if (!old_obj->is_forwarded()) { + // No need to move the object, it stays at the same slot + continue; + } + size_t words_size = old_obj->size(); + size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); + + size_t old_start = r->region_number(); + size_t old_end = old_start + num_regions - 1; + size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); + size_t new_end = new_start + num_regions - 1; + assert(old_start != new_start, "must be real move"); + assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number()); + + Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), + heap->get_region(new_start)->bottom(), + ShenandoahHeapRegion::region_size_words()*num_regions); + + oop new_obj = oop(heap->get_region(new_start)->bottom()); + new_obj->init_mark_raw(); + + { + for (size_t c = old_start; c <= old_end; c++) { + ShenandoahHeapRegion* r = heap->get_region(c); + r->make_regular_bypass(); + r->set_top(r->bottom()); + } + + for (size_t c = new_start; c <= new_end; c++) { + ShenandoahHeapRegion* r = heap->get_region(c); + if (c == new_start) { + r->make_humongous_start_bypass(); + } else { + r->make_humongous_cont_bypass(); + } + + // Trailing region may be non-full, record the remainder there + size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); + if ((c == new_end) && (remainder != 0)) { + r->set_top(r->bottom() + remainder); + } else { + r->set_top(r->end()); + } + + r->reset_alloc_metadata_to_shared(); + } + } + } + } +} + +// This is slightly different to ShHeap::reset_next_mark_bitmap: +// we need to remain able to walk pinned regions. +// Since pinned region do not move and don't get compacted, we will get holes with +// unreachable objects in them (which may have pointers to unloaded Klasses and thus +// cannot be iterated over using oop->size(). The only way to safely iterate over those is using +// a valid marking bitmap and valid TAMS pointer. This class only resets marking +// bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. +class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { +private: + ShenandoahRegionIterator _regions; + +public: + ShenandoahMCResetCompleteBitmapTask() : + AbstractGangTask("Parallel Reset Bitmap Task") { + } + + void work(uint worker_id) { + ShenandoahHeapRegion* region = _regions.next(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); + while (region != NULL) { + if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { + ctx->clear_bitmap(region); + } + region = _regions.next(); + } + } +}; + +void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { + GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); + ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Compact regular objects first + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); + ShenandoahCompactObjectsTask compact_task(worker_slices); + heap->workers()->run_task(&compact_task); + } + + // Compact humongous objects after regular object moves + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); + compact_humongous_objects(); + } + + // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer + // and must ensure the bitmap is in sync. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); + ShenandoahMCResetCompleteBitmapTask task; + heap->workers()->run_task(&task); + } + + // Bring regions in proper states after the collection, and set heap properties. + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); + + ShenandoahPostCompactClosure post_compact; + heap->heap_region_iterate(&post_compact); + heap->set_used(post_compact.get_live()); + + heap->collection_set()->clear(); + heap->free_set()->rebuild(); + } + + heap->clear_cancelled_gc(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp 2020-01-17 17:10:17.073130095 +0100 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP + +#include "gc/shared/gcTimer.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" + +/** + * This implements Full GC (e.g. when invoking System.gc()) using a mark-compact algorithm. + * + * Current implementation is parallel sliding Lisp-2-style algorithm, based on + * "Parallel Garbage Collection for Shared Memory Multiprocessors", by Christine Flood et al. + * http://people.csail.mit.edu/shanir/publications/dfsz2001.pdf + * + * It is implemented in four phases: + * + * 1. Mark all live objects of the heap by traversing objects starting at GC roots. + * 2. Calculate the new location of each live object. This is done by sequentially scanning + * the heap, keeping track of a next-location-pointer, which is then written to each + * object's fwdptr field. + * 3. Update all references. This is implemented by another scan of the heap, and updates + * all references in live objects by what's stored in the target object's fwdptr. + * 4. Compact the heap by copying all live objects to their new location. + * + * Parallelization is handled by assigning each GC worker the slice of the heap (the set of regions) + * where it does sliding compaction, without interfering with other threads. + */ + +class PreservedMarksSet; + +class ShenandoahMarkCompact : public CHeapObj { + friend class ShenandoahPrepareForCompactionObjectClosure; +private: + GCTimer* _gc_timer; + + PreservedMarksSet* _preserved_marks; + +public: + ShenandoahMarkCompact(); + void initialize(GCTimer* gc_timer); + + void do_it(GCCause::Cause gc_cause); + +private: + void phase1_mark_heap(); + void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices); + void phase3_update_references(); + void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); + + void calculate_target_humongous_objects(); + void compact_humongous_objects(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp 2020-01-17 17:10:17.690130061 +0100 @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/markBitMap.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.hpp" + +ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) : + _top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), + _top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), + _top_at_mark_starts(_top_at_mark_starts_base - + ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) { + _mark_bit_map.initialize(heap_region, bitmap_region); +} + +bool ShenandoahMarkingContext::is_bitmap_clear() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t num_regions = heap->num_regions(); + for (size_t idx = 0; idx < num_regions; idx++) { + ShenandoahHeapRegion* r = heap->get_region(idx); + if (heap->is_bitmap_slice_committed(r) && !is_bitmap_clear_range(r->bottom(), r->end())) { + return false; + } + } + return true; +} + +bool ShenandoahMarkingContext::is_bitmap_clear_range(HeapWord* start, HeapWord* end) const { + return _mark_bit_map.getNextMarkedWordAddress(start, end) == end; +} + +void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion* r) { + size_t idx = r->region_number(); + HeapWord *bottom = r->bottom(); + _top_at_mark_starts_base[idx] = bottom; + _top_bitmaps[idx] = bottom; +} + +void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRegion *r) { + size_t region_number = r->region_number(); + HeapWord* old_tams = _top_at_mark_starts_base[region_number]; + HeapWord* new_tams = r->top(); + + assert(new_tams >= old_tams, + "Region " SIZE_FORMAT", TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, + region_number, p2i(old_tams), p2i(new_tams)); + assert(is_bitmap_clear_range(old_tams, new_tams), + "Region " SIZE_FORMAT ", bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT, + region_number, p2i(old_tams), p2i(new_tams)); + + _top_at_mark_starts_base[region_number] = new_tams; + _top_bitmaps[region_number] = new_tams; +} + +void ShenandoahMarkingContext::reset_top_at_mark_start(ShenandoahHeapRegion* r) { + _top_at_mark_starts_base[r->region_number()] = r->bottom(); +} + +HeapWord* ShenandoahMarkingContext::top_at_mark_start(ShenandoahHeapRegion* r) const { + return _top_at_mark_starts_base[r->region_number()]; +} + +void ShenandoahMarkingContext::reset_top_bitmap(ShenandoahHeapRegion* r) { + assert(is_bitmap_clear_range(r->bottom(), r->end()), + "Region " SIZE_FORMAT " should have no marks in bitmap", r->region_number()); + _top_bitmaps[r->region_number()] = r->bottom(); +} + +void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) { + HeapWord* bottom = r->bottom(); + HeapWord* top_bitmap = _top_bitmaps[r->region_number()]; + if (top_bitmap > bottom) { + _mark_bit_map.clear_range_large(MemRegion(bottom, top_bitmap)); + _top_bitmaps[r->region_number()] = bottom; + } + assert(is_bitmap_clear_range(bottom, r->end()), + "Region " SIZE_FORMAT " should have no marks in bitmap", r->region_number()); +} + +bool ShenandoahMarkingContext::is_complete() { + return _is_complete.is_set(); +} + +void ShenandoahMarkingContext::mark_complete() { + _is_complete.set(); +} + +void ShenandoahMarkingContext::mark_incomplete() { + _is_complete.unset(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp 2020-01-17 17:10:18.298130028 +0100 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP + +#include "gc/shared/markBitMap.hpp" +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" +#include "oops/oopsHierarchy.hpp" + +class HeapWord; + +/** + * Encapsulate a marking bitmap with the top-at-mark-start and top-bitmaps array. + */ +class ShenandoahMarkingContext : public CHeapObj { +private: + MarkBitMap _mark_bit_map; + + HeapWord** const _top_bitmaps; + HeapWord** const _top_at_mark_starts_base; + HeapWord** const _top_at_mark_starts; + + ShenandoahSharedFlag _is_complete; + +public: + ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions); + + /* + * Marks the object. Returns true if the object has not been marked before and has + * been marked by this thread. Returns false if the object has already been marked, + * or if a competing thread succeeded in marking this object. + */ + inline bool mark(oop obj); + + inline bool is_marked(oop obj) const; + + inline bool allocated_after_mark_start(HeapWord* addr) const; + + inline MarkBitMap* mark_bit_map(); + + HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const; + void capture_top_at_mark_start(ShenandoahHeapRegion* r); + void reset_top_at_mark_start(ShenandoahHeapRegion* r); + void initialize_top_at_mark_start(ShenandoahHeapRegion* r); + + void reset_top_bitmap(ShenandoahHeapRegion *r); + void clear_bitmap(ShenandoahHeapRegion *r); + + bool is_bitmap_clear() const; + bool is_bitmap_clear_range(HeapWord* start, HeapWord* end) const; + + bool is_complete(); + void mark_complete(); + void mark_incomplete(); + +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp 2020-01-17 17:10:18.914129994 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP + +#include "gc/shenandoah/shenandoahMarkingContext.hpp" + +inline MarkBitMap* ShenandoahMarkingContext::mark_bit_map() { + return &_mark_bit_map; +} + +inline bool ShenandoahMarkingContext::mark(oop obj) { + shenandoah_assert_not_forwarded(NULL, obj); + HeapWord* addr = (HeapWord*) obj; + return (! allocated_after_mark_start(addr)) && _mark_bit_map.parMark(addr); +} + +inline bool ShenandoahMarkingContext::is_marked(oop obj) const { + HeapWord* addr = (HeapWord*) obj; + return allocated_after_mark_start(addr) || _mark_bit_map.isMarked(addr); +} + +inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const { + uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); + HeapWord* top_at_mark_start = _top_at_mark_starts[index]; + bool alloc_after_mark_start = addr >= top_at_mark_start; + return alloc_after_mark_start; +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp 2020-01-17 17:10:19.522129960 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahMemoryPool.hpp" + +ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap) : + CollectedMemoryPool("Shenandoah", + heap->initial_capacity(), + heap->max_capacity(), + true /* support_usage_threshold */), + _heap(heap) {} + +MemoryUsage ShenandoahMemoryPool::get_memory_usage() { + size_t initial = initial_size(); + size_t max = max_size(); + size_t used = used_in_bytes(); + size_t committed = _heap->committed(); + + // These asserts can never fail: max is stable, and all updates to other values never overflow max. + assert(initial <= max, "initial: " SIZE_FORMAT ", max: " SIZE_FORMAT, initial, max); + assert(used <= max, "used: " SIZE_FORMAT ", max: " SIZE_FORMAT, used, max); + assert(committed <= max, "committed: " SIZE_FORMAT ", max: " SIZE_FORMAT, committed, max); + + // Committed and used are updated concurrently and independently. They can momentarily break + // the assert below, which would also fail in downstream code. To avoid that, adjust values + // to make sense under the race. See JDK-8207200. + committed = MAX2(used, committed); + assert(used <= committed, "used: " SIZE_FORMAT ", committed: " SIZE_FORMAT, used, committed); + + return MemoryUsage(initial, used, committed, max); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp 2020-01-17 17:10:20.125129927 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP +#define SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP + +#ifndef SERIALGC +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "services/memoryPool.hpp" +#include "services/memoryUsage.hpp" +#endif + +class ShenandoahMemoryPool : public CollectedMemoryPool { +private: + ShenandoahHeap* _heap; + +public: + ShenandoahMemoryPool(ShenandoahHeap* pool); + MemoryUsage get_memory_usage(); + size_t used_in_bytes() { return _heap->used(); } + size_t max_size() const { return _heap->max_capacity(); } +}; + +#endif //SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp 2020-01-17 17:10:20.724129894 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahMetrics.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" + +/* + * Internal fragmentation metric: describes how fragmented the heap regions are. + * + * It is derived as: + * + * sum(used[i]^2, i=0..k) + * IF = 1 - ------------------------------ + * C * sum(used[i], i=0..k) + * + * ...where k is the number of regions in computation, C is the region capacity, and + * used[i] is the used space in the region. + * + * The non-linearity causes IF to be lower for the cases where the same total heap + * used is densely packed. For example: + * a) Heap is completely full => IF = 0 + * b) Heap is half full, first 50% regions are completely full => IF = 0 + * c) Heap is half full, each region is 50% full => IF = 1/2 + * d) Heap is quarter full, first 50% regions are completely full => IF = 0 + * e) Heap is quarter full, each region is 25% full => IF = 3/4 + * f) Heap has the small object per each region => IF =~ 1 + */ +double ShenandoahMetrics::internal_fragmentation() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + double squared = 0; + double linear = 0; + int count = 0; + for (size_t c = 0; c < heap->num_regions(); c++) { + ShenandoahHeapRegion* r = heap->get_region(c); + size_t used = r->used(); + squared += used * used; + linear += used; + count++; + } + + if (count > 0) { + double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); + return 1 - s; + } else { + return 0; + } +} + +/* + * External fragmentation metric: describes how fragmented the heap is. + * + * It is derived as: + * + * EF = 1 - largest_contiguous_free / total_free + * + * For example: + * a) Heap is completely empty => EF = 0 + * b) Heap is completely full => EF = 1 + * c) Heap is first-half full => EF = 1/2 + * d) Heap is half full, full and empty regions interleave => EF =~ 1 + */ +double ShenandoahMetrics::external_fragmentation() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + size_t last_idx = 0; + size_t max_contig = 0; + size_t empty_contig = 0; + + size_t free = 0; + for (size_t c = 0; c < heap->num_regions(); c++) { + ShenandoahHeapRegion* r = heap->get_region(c); + + if (r->is_empty() && (last_idx + 1 == c)) { + empty_contig++; + } else { + empty_contig = 0; + } + + free += r->free(); + max_contig = MAX2(max_contig, empty_contig); + last_idx = c; + } + + if (free > 0) { + return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); + } else { + return 1; + } +} + +ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot() { + _heap = ShenandoahHeap::heap(); +} + +void ShenandoahMetricsSnapshot::snap_before() { + _used_before = _heap->used(); + _if_before = ShenandoahMetrics::internal_fragmentation(); + _ef_before = ShenandoahMetrics::external_fragmentation(); +} +void ShenandoahMetricsSnapshot::snap_after() { + _used_after = _heap->used(); + _if_after = ShenandoahMetrics::internal_fragmentation(); + _ef_after = ShenandoahMetrics::external_fragmentation(); +} + +bool ShenandoahMetricsSnapshot::is_good_progress() { + // Under the critical threshold? + size_t free_actual = _heap->free_set()->available(); + size_t free_expected = _heap->max_capacity() / 100 * ShenandoahCriticalFreeThreshold; + bool prog_free = free_actual >= free_expected; + log_info(gc, ergo)("%s progress for free space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", + prog_free ? "Good" : "Bad", + byte_size_in_proper_unit(free_actual), proper_unit_for_byte_size(free_actual), + byte_size_in_proper_unit(free_expected), proper_unit_for_byte_size(free_expected)); + if (!prog_free) { + return false; + } + + // Freed up enough? + size_t progress_actual = (_used_before > _used_after) ? _used_before - _used_after : 0; + size_t progress_expected = ShenandoahHeapRegion::region_size_bytes(); + bool prog_used = progress_actual >= progress_expected; + log_info(gc, ergo)("%s progress for used space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", + prog_used ? "Good" : "Bad", + byte_size_in_proper_unit(progress_actual), proper_unit_for_byte_size(progress_actual), + byte_size_in_proper_unit(progress_expected), proper_unit_for_byte_size(progress_expected)); + if (prog_used) { + return true; + } + + // Internal fragmentation is down? + double if_actual = _if_before - _if_after; + double if_expected = 0.01; // 1% should be enough + bool prog_if = if_actual >= if_expected; + log_info(gc, ergo)("%s progress for internal fragmentation: %.1f%%, need %.1f%%", + prog_if ? "Good" : "Bad", + if_actual * 100, if_expected * 100); + if (prog_if) { + return true; + } + + // External fragmentation is down? + double ef_actual = _ef_before - _ef_after; + double ef_expected = 0.01; // 1% should be enough + bool prog_ef = ef_actual >= ef_expected; + log_info(gc, ergo)("%s progress for external fragmentation: %.1f%%, need %.1f%%", + prog_ef ? "Good" : "Bad", + ef_actual * 100, ef_expected * 100); + if (prog_ef) { + return true; + } + + // Nothing good had happened. + return false; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMetrics.hpp 2020-01-17 17:10:21.330129861 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP + +#include "gc/shenandoah/shenandoahHeap.hpp" + +class ShenandoahMetrics { +private: + ShenandoahMetrics() {} + +public: + static double internal_fragmentation(); + static double external_fragmentation(); +}; + +class ShenandoahMetricsSnapshot : public StackObj { +private: + ShenandoahHeap* _heap; + size_t _used_before, _used_after; + double _if_before, _if_after; + double _ef_before, _ef_after; + +public: + ShenandoahMetricsSnapshot(); + + void snap_before(); + void snap_after(); + + bool is_good_progress(); +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMode.hpp 2020-01-17 17:10:21.936129827 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP + +#include "memory/allocation.hpp" + +class ShenandoahHeuristics; + +class ShenandoahMode : public CHeapObj { +public: + virtual void initialize_flags() const = 0; + virtual ShenandoahHeuristics* initialize_heuristics() const = 0; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp 2020-01-17 17:10:22.544129794 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/collectorCounters.hpp" +#include "gc/shared/generationCounters.hpp" +#include "gc/shared/hSpaceCounters.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegionCounters.hpp" +#include "memory/metaspaceCounters.hpp" +#include "services/memoryService.hpp" + +class ShenandoahYoungGenerationCounters : public GenerationCounters { +public: + ShenandoahYoungGenerationCounters() : + GenerationCounters("Young", 0, 0, 0, (size_t)0, (size_t)0) {}; + + virtual void update_all() { + // no update + } +}; + +class ShenandoahGenerationCounters : public GenerationCounters { +private: + ShenandoahHeap* _heap; +public: + ShenandoahGenerationCounters(ShenandoahHeap* heap) : + GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->capacity()), + _heap(heap) + {}; + + virtual void update_all() { + _current_size->set_value(_heap->capacity()); + } +}; + +ShenandoahMonitoringSupport::ShenandoahMonitoringSupport(ShenandoahHeap* heap) : + _partial_counters(NULL), + _full_counters(NULL) +{ + // Collection counters do not fit Shenandoah very well. + // We record partial cycles as "young", and full cycles (including full STW GC) as "old". + _partial_counters = new CollectorCounters("Shenandoah partial", 0); + _full_counters = new CollectorCounters("Shenandoah full", 1); + + // We report young gen as unused. + _young_counters = new ShenandoahYoungGenerationCounters(); + _heap_counters = new ShenandoahGenerationCounters(heap); + _space_counters = new HSpaceCounters(_heap_counters->name_space(), "Heap", 0, heap->max_capacity(), heap->initial_capacity()); + + _heap_region_counters = new ShenandoahHeapRegionCounters(); +} + +CollectorCounters* ShenandoahMonitoringSupport::stw_collection_counters() { + return _full_counters; +} + +CollectorCounters* ShenandoahMonitoringSupport::full_stw_collection_counters() { + return _full_counters; +} + +CollectorCounters* ShenandoahMonitoringSupport::concurrent_collection_counters() { + return _full_counters; +} + +CollectorCounters* ShenandoahMonitoringSupport::partial_collection_counters() { + return _partial_counters; +} + +void ShenandoahMonitoringSupport::update_counters() { + MemoryService::track_memory_usage(); + + if (UsePerfData) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t used = heap->used(); + size_t capacity = heap->max_capacity(); + _heap_counters->update_all(); + _space_counters->update_all(capacity, used); + _heap_region_counters->update(); + + MetaspaceCounters::update_performance_counters(); + CompressedClassSpaceCounters::update_performance_counters(); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.hpp 2020-01-17 17:10:23.145129761 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015, 2017, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP + +#include "memory/allocation.hpp" + +class GenerationCounters; +class HSpaceCounters; +class ShenandoahHeap; +class CollectorCounters; +class ShenandoahHeapRegionCounters; + +class ShenandoahMonitoringSupport : public CHeapObj { +private: + CollectorCounters* _partial_counters; + CollectorCounters* _full_counters; + + GenerationCounters* _young_counters; + GenerationCounters* _heap_counters; + + HSpaceCounters* _space_counters; + + ShenandoahHeapRegionCounters* _heap_region_counters; + +public: + ShenandoahMonitoringSupport(ShenandoahHeap* heap); + CollectorCounters* stw_collection_counters(); + CollectorCounters* full_stw_collection_counters(); + CollectorCounters* concurrent_collection_counters(); + CollectorCounters* partial_collection_counters(); + void update_counters(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahNormalMode.cpp 2020-01-17 17:10:23.746129728 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahNormalMode.hpp" +#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +void ShenandoahNormalMode::initialize_flags() const { + SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +} + +ShenandoahHeuristics* ShenandoahNormalMode::initialize_heuristics() const { + if (ShenandoahGCHeuristics != NULL) { + if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { + return new ShenandoahAggressiveHeuristics(); + } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { + return new ShenandoahStaticHeuristics(); + } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { + return new ShenandoahAdaptiveHeuristics(); + } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { + return new ShenandoahCompactHeuristics(); + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); + } + } + ShouldNotReachHere(); + return NULL; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahNormalMode.hpp 2020-01-17 17:10:24.348129694 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP + +#include "gc/shenandoah/shenandoahMode.hpp" + +class ShenandoahHeuristics; + +class ShenandoahNormalMode : public ShenandoahMode { +public: + virtual void initialize_flags() const; + virtual ShenandoahHeuristics* initialize_heuristics() const; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp 2020-01-17 17:10:24.955129661 +0100 @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahNumberSeq.hpp" +#include "runtime/atomic.hpp" + +HdrSeq::HdrSeq() { + _hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal); + for (int c = 0; c < MagBuckets; c++) { + _hdr[c] = NULL; + } +} + +HdrSeq::~HdrSeq() { + for (int c = 0; c < MagBuckets; c++) { + int* sub = _hdr[c]; + if (sub != NULL) { + FREE_C_HEAP_ARRAY(int, sub); + } + } + FREE_C_HEAP_ARRAY(int*, _hdr); +} + +void HdrSeq::add(double val) { + if (val < 0) { + assert (false, "value (%8.2f) is not negative", val); + val = 0; + } + + NumberSeq::add(val); + + double v = val; + int mag; + if (v > 0) { + mag = 0; + while (v > 1) { + mag++; + v /= 10; + } + while (v < 0.1) { + mag--; + v *= 10; + } + } else { + mag = MagMinimum; + } + + int bucket = -MagMinimum + mag; + int sub_bucket = (int) (v * ValBuckets); + + // Defensively saturate for product bits: + if (bucket < 0) { + assert (false, "bucket index (%d) underflow for value (%8.2f)", bucket, val); + bucket = 0; + } + + if (bucket >= MagBuckets) { + assert (false, "bucket index (%d) overflow for value (%8.2f)", bucket, val); + bucket = MagBuckets - 1; + } + + if (sub_bucket < 0) { + assert (false, "sub-bucket index (%d) underflow for value (%8.2f)", sub_bucket, val); + sub_bucket = 0; + } + + if (sub_bucket >= ValBuckets) { + assert (false, "sub-bucket index (%d) overflow for value (%8.2f)", sub_bucket, val); + sub_bucket = ValBuckets - 1; + } + + int* b = _hdr[bucket]; + if (b == NULL) { + b = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal); + for (int c = 0; c < ValBuckets; c++) { + b[c] = 0; + } + _hdr[bucket] = b; + } + b[sub_bucket]++; +} + +double HdrSeq::percentile(double level) const { + // target should be non-zero to find the first sample + int target = MAX2(1, (int) (level * num() / 100)); + int cnt = 0; + for (int mag = 0; mag < MagBuckets; mag++) { + if (_hdr[mag] != NULL) { + for (int val = 0; val < ValBuckets; val++) { + cnt += _hdr[mag][val]; + if (cnt >= target) { + return pow(10.0, MagMinimum + mag) * val / ValBuckets; + } + } + } + } + return maximum(); +} + +BinaryMagnitudeSeq::BinaryMagnitudeSeq() { + _mags = NEW_C_HEAP_ARRAY(size_t, BitsPerSize_t, mtInternal); + for (int c = 0; c < BitsPerSize_t; c++) { + _mags[c] = 0; + } + _sum = 0; +} + +BinaryMagnitudeSeq::~BinaryMagnitudeSeq() { + FREE_C_HEAP_ARRAY(size_t, _mags); +} + +void BinaryMagnitudeSeq::add(size_t val) { + Atomic::add(val, &_sum); + + int mag = log2_intptr(val) + 1; + + // Defensively saturate for product bits: + if (mag < 0) { + assert (false, "bucket index (%d) underflow for value (" SIZE_FORMAT ")", mag, val); + mag = 0; + } + + if (mag >= BitsPerSize_t) { + assert (false, "bucket index (%d) overflow for value (" SIZE_FORMAT ")", mag, val); + mag = BitsPerSize_t - 1; + } + + Atomic::add((size_t)1, &_mags[mag]); +} + +size_t BinaryMagnitudeSeq::level(int level) const { + if (0 <= level && level < BitsPerSize_t) { + return _mags[level]; + } else { + return 0; + } +} + +size_t BinaryMagnitudeSeq::num() const { + size_t r = 0; + for (int c = 0; c < BitsPerSize_t; c++) { + r += _mags[c]; + } + return r; +} + +size_t BinaryMagnitudeSeq::sum() const { + return _sum; +} + +int BinaryMagnitudeSeq::min_level() const { + for (int c = 0; c < BitsPerSize_t; c++) { + if (_mags[c] != 0) { + return c; + } + } + return BitsPerSize_t - 1; +} + +int BinaryMagnitudeSeq::max_level() const { + for (int c = BitsPerSize_t - 1; c > 0; c--) { + if (_mags[c] != 0) { + return c; + } + } + return 0; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp 2020-01-17 17:10:25.555129628 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP + +#include "utilities/numberSeq.hpp" + +// HDR sequence stores the low-resolution high-dynamic-range values. +// It does so by maintaining the double array, where first array defines +// the magnitude of the value being stored, and the second array maintains +// the low resolution histogram within that magnitude. For example, storing +// 4.352819 * 10^3 increments the bucket _hdr[3][435]. This allows for +// memory efficient storage of huge amount of samples. +// +// Accepts positive numbers only. +class HdrSeq: public NumberSeq { +private: + enum PrivateConstants { + ValBuckets = 512, + MagBuckets = 24, + MagMinimum = -12 + }; + int** _hdr; + +public: + HdrSeq(); + ~HdrSeq(); + + virtual void add(double val); + double percentile(double level) const; +}; + +// Binary magnitude sequence stores the power-of-two histogram. +// It has very low memory requirements, and is thread-safe. When accuracy +// is not needed, it is preferred over HdrSeq. +class BinaryMagnitudeSeq { +private: + size_t _sum; + size_t* _mags; + +public: + BinaryMagnitudeSeq(); + ~BinaryMagnitudeSeq(); + + void add(size_t val); + size_t num() const; + size_t level(int level) const; + size_t sum() const; + int min_level() const; + int max_level() const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp 2020-01-17 17:10:26.163129594 +0100 @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP + +#include "gc/shared/referenceProcessor.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahStrDedupQueue.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "memory/iterator.hpp" +#include "runtime/thread.hpp" + +enum UpdateRefsMode { + NONE, // No reference updating + RESOLVE, // Only a resolve (no reference updating) + SIMPLE, // Reference updating using simple store + CONCURRENT // Reference updating using CAS +}; + +enum StringDedupMode { + NO_DEDUP, // Do not do anything for String deduplication + ENQUEUE_DEDUP // Enqueue candidate Strings for deduplication +}; + +class ShenandoahMarkRefsSuperClosure : public MetadataVisitingOopIterateClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + +protected: + template + void work(T *p); + +public: + ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp); +}; + +class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahMarkUpdateRefsDedupClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahMarkUpdateRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahMarkUpdateRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahMarkRefsDedupClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahMarkResolveRefsClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkResolveRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahMarkRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahMarkRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahMarkRefsSuperClosure(q, rp) {}; + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahUpdateHeapRefsClosure : public BasicOopIterateClosure { +private: + ShenandoahHeap* _heap; + + template + void do_oop_work(T* p); + +public: + ShenandoahUpdateHeapRefsClosure() : + _heap(ShenandoahHeap::heap()) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalSuperClosure : public MetadataVisitingOopIterateClosure { +private: + ShenandoahTraversalGC* const _traversal_gc; + Thread* const _thread; + ShenandoahObjToScanQueue* const _queue; + ShenandoahMarkingContext* const _mark_context; +protected: + ShenandoahTraversalSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + MetadataVisitingOopIterateClosure(rp), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), + _thread(Thread::current()), + _queue(q), + _mark_context(ShenandoahHeap::heap()->marking_context()) { + } + + template + void work(T* p); + +}; + +class ShenandoahTraversalRootsClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalRootsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDedupClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDedupClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDegenClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDegenClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalMetadataDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDedupDegenClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDedupDegenClosure : public ShenandoahTraversalSuperClosure { +private: + template + inline void do_oop_work(T* p) { work(p); } + +public: + ShenandoahTraversalMetadataDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) { do_oop_work(p); } + + virtual bool do_metadata() { return true; } +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp 2020-01-17 17:10:26.773129561 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.inline.hpp" + +template +inline void ShenandoahMarkRefsSuperClosure::work(T *p) { + ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); +} + +template +inline void ShenandoahUpdateHeapRefsClosure::do_oop_work(T* p) { + _heap->maybe_update_with_forwarded(p); +} + +template +inline void ShenandoahTraversalSuperClosure::work(T* p) { + _traversal_gc->process_oop(p, _thread, _queue, _mark_context); +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2020-01-17 17:10:27.383129527 +0100 @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahPacer.hpp" + +/* + * In normal concurrent cycle, we have to pace the application to let GC finish. + * + * Here, we do not know how large would be the collection set, and what are the + * relative performances of the each stage in the concurrent cycle, and so we have to + * make some assumptions. + * + * For concurrent mark, there is no clear notion of progress. The moderately accurate + * and easy to get metric is the amount of live objects the mark had encountered. But, + * that does directly correlate with the used heap, because the heap might be fully + * dead or fully alive. We cannot assume either of the extremes: we would either allow + * application to run out of memory if we assume heap is fully dead but it is not, and, + * conversely, we would pacify application excessively if we assume heap is fully alive + * but it is not. So we need to guesstimate the particular expected value for heap liveness. + * The best way to do this is apparently recording the past history. + * + * For concurrent evac and update-refs, we are walking the heap per-region, and so the + * notion of progress is clear: we get reported the "used" size from the processed regions + * and use the global heap-used as the baseline. + * + * The allocatable space when GC is running is "free" at the start of cycle, but the + * accounted budget is based on "used". So, we need to adjust the tax knowing that. + * Also, since we effectively count the used space three times (mark, evac, update-refs), + * we need to multiply the tax by 3. Example: for 10 MB free and 90 MB used, GC would + * come back with 3*90 MB budget, and thus for each 1 MB of allocation, we have to pay + * 3*90 / 10 MBs. In the end, we would pay back the entire budget. + */ + +void ShenandoahPacer::setup_for_mark() { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + size_t live = update_and_get_progress_history(); + size_t free = _heap->free_set()->available(); + + size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; + size_t taxable = free - non_taxable; + + double tax = 1.0 * live / taxable; // base tax for available free space + tax *= 3; // mark is phase 1 of 3, claim 1/3 of free for it + tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap + + restart_with(non_taxable, tax); + + log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); +} + +void ShenandoahPacer::setup_for_evac() { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + size_t used = _heap->collection_set()->used(); + size_t free = _heap->free_set()->available(); + + size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; + size_t taxable = free - non_taxable; + + double tax = 1.0 * used / taxable; // base tax for available free space + tax *= 2; // evac is phase 2 of 3, claim 1/2 of remaining free + tax = MAX2(1, tax); // never allocate more than GC processes during the phase + tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap + + restart_with(non_taxable, tax); + + log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); +} + +void ShenandoahPacer::setup_for_updaterefs() { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + size_t used = _heap->used(); + size_t free = _heap->free_set()->available(); + + size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; + size_t taxable = free - non_taxable; + + double tax = 1.0 * used / taxable; // base tax for available free space + tax *= 1; // update-refs is phase 3 of 3, claim the remaining free + tax = MAX2(1, tax); // never allocate more than GC processes during the phase + tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap + + restart_with(non_taxable, tax); + + log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); +} + +/* + * Traversal walks the entire heap once, and therefore we have to make assumptions about its + * liveness, like concurrent mark does. + */ + +void ShenandoahPacer::setup_for_traversal() { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + size_t live = update_and_get_progress_history(); + size_t free = _heap->free_set()->available(); + + size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; + size_t taxable = free - non_taxable; + + double tax = 1.0 * live / taxable; // base tax for available free space + tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap + + restart_with(non_taxable, tax); + + log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); +} + +/* + * In idle phase, we have to pace the application to let control thread react with GC start. + * + * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges + * it had seen recent allocations. It will naturally pace the allocations if control thread is + * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget + * for applications to allocate at. + */ + +void ShenandoahPacer::setup_for_idle() { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; + double tax = 1; + + restart_with(initial, tax); + + log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), + tax); +} + +size_t ShenandoahPacer::update_and_get_progress_history() { + if (_progress == -1) { + // First initialization, report some prior + Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); + return (size_t) (_heap->max_capacity() * 0.1); + } else { + // Record history, and reply historical data + _progress_history->add(_progress); + Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); + return (size_t) (_progress_history->avg() * HeapWordSize); + } +} + +void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { + size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; + STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); + Atomic::xchg((intptr_t)initial, &_budget); + Atomic::store(tax_rate, &_tax_rate); + Atomic::inc(&_epoch); +} + +bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + intptr_t tax = MAX2(1, words * Atomic::load(&_tax_rate)); + + intptr_t cur = 0; + intptr_t new_val = 0; + do { + cur = Atomic::load(&_budget); + if (cur < tax && !force) { + // Progress depleted, alas. + return false; + } + new_val = cur - tax; + } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur); + return true; +} + +void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + if (_epoch != epoch) { + // Stale ticket, no need to unpace. + return; + } + + intptr_t tax = MAX2(1, words * Atomic::load(&_tax_rate)); + Atomic::add(tax, &_budget); +} + +intptr_t ShenandoahPacer::epoch() { + return Atomic::load(&_epoch); +} + +void ShenandoahPacer::pace_for_alloc(size_t words) { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + + // Fast path: try to allocate right away + if (claim_for_alloc(words, false)) { + return; + } + + // Threads that are attaching should not block at all: they are not + // fully initialized yet. Calling sleep() on them would be awkward. + // This is probably the path that allocates the thread oop itself. + // Forcefully claim without waiting. + if (JavaThread::current()->is_attaching_via_jni()) { + claim_for_alloc(words, true); + return; + } + + size_t max = ShenandoahPacingMaxDelay; + double start = os::elapsedTime(); + + size_t total = 0; + size_t cur = 0; + + while (true) { + // We could instead assist GC, but this would suffice for now. + // This code should also participate in safepointing. + // Perform the exponential backoff, limited by max. + + cur = cur * 2; + if (total + cur > max) { + cur = (max > total) ? (max - total) : 0; + } + cur = MAX2(1, cur); + + os::sleep(Thread::current(), cur, true); + + double end = os::elapsedTime(); + total = (size_t)((end - start) * 1000); + + if (total > max) { + // Spent local time budget to wait for enough GC progress. + // Breaking out and allocating anyway, which may mean we outpace GC, + // and start Degenerated GC cycle. + _delays.add(total); + + // Forcefully claim the budget: it may go negative at this point, and + // GC should replenish for this and subsequent allocations + claim_for_alloc(words, true); + break; + } + + if (claim_for_alloc(words, false)) { + // Acquired enough permit, nice. Can allocate now. + _delays.add(total); + break; + } + } +} + +void ShenandoahPacer::print_on(outputStream* out) const { + out->print_cr("ALLOCATION PACING:"); + out->cr(); + + out->print_cr("Max pacing delay is set for " UINTX_FORMAT " ms.", ShenandoahPacingMaxDelay); + out->cr(); + + out->print_cr("Higher delay would prevent application outpacing the GC, but it will hide the GC latencies"); + out->print_cr("from the STW pause times. Pacing affects the individual threads, and so it would also be"); + out->print_cr("invisible to the usual profiling tools, but would add up to end-to-end application latency."); + out->print_cr("Raise max pacing delay with care."); + out->cr(); + + out->print_cr("Actual pacing delays histogram:"); + out->cr(); + + out->print_cr("%10s - %10s %12s%12s", "From", "To", "Count", "Sum"); + + size_t total_count = 0; + size_t total_sum = 0; + for (int c = _delays.min_level(); c <= _delays.max_level(); c++) { + int l = (c == 0) ? 0 : 1 << (c - 1); + int r = 1 << c; + size_t count = _delays.level(c); + size_t sum = count * (r - l) / 2; + total_count += count; + total_sum += sum; + + out->print_cr("%7d ms - %7d ms: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", l, r, count, sum); + } + out->print_cr("%23s: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", "Total", total_count, total_sum); + out->cr(); + out->print_cr("Pacing delays are measured from entering the pacing code till exiting it. Therefore,"); + out->print_cr("observed pacing delays may be higher than the threshold when paced thread spent more"); + out->print_cr("time in the pacing code. It usually happens when thread is de-scheduled while paced,"); + out->print_cr("OS takes longer to unblock the thread, or JVM experiences an STW pause."); + out->cr(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp 2020-01-17 17:10:27.979129494 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP + +#include "gc/shenandoah/shenandoahNumberSeq.hpp" +#include "memory/allocation.hpp" + +class ShenandoahHeap; + +#define PACING_PROGRESS_UNINIT (-1) +#define PACING_PROGRESS_ZERO ( 0) + +/** + * ShenandoahPacer provides allocation pacing mechanism. + * + * Currently it implements simple tax-and-spend pacing policy: GC threads provide + * credit, allocating thread spend the credit, or stall when credit is not available. + */ +class ShenandoahPacer : public CHeapObj { +private: + ShenandoahHeap* _heap; + BinaryMagnitudeSeq _delays; + TruncatedSeq* _progress_history; + + // Set once per phase + volatile intptr_t _epoch; + volatile double _tax_rate; + + // Heavily updated, protect from accidental false sharing + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile intptr_t)); + volatile intptr_t _budget; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + // Heavily updated, protect from accidental false sharing + DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile intptr_t)); + volatile intptr_t _progress; + DEFINE_PAD_MINUS_SIZE(3, DEFAULT_CACHE_LINE_SIZE, 0); + +public: + ShenandoahPacer(ShenandoahHeap* heap) : + _heap(heap), + _progress_history(new TruncatedSeq(5)), + _epoch(0), + _tax_rate(1), + _budget(0), + _progress(PACING_PROGRESS_UNINIT) {} + + void setup_for_idle(); + void setup_for_mark(); + void setup_for_evac(); + void setup_for_updaterefs(); + void setup_for_traversal(); + + inline void report_mark(size_t words); + inline void report_evac(size_t words); + inline void report_updaterefs(size_t words); + + inline void report_alloc(size_t words); + + bool claim_for_alloc(size_t words, bool force); + void pace_for_alloc(size_t words); + void unpace_for_alloc(intptr_t epoch, size_t words); + + intptr_t epoch(); + + void print_on(outputStream* out) const; + +private: + inline void report_internal(size_t words); + inline void report_progress_internal(size_t words); + + void restart_with(size_t non_taxable_bytes, double tax_rate); + + size_t update_and_get_progress_history(); +}; + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp 2020-01-17 17:10:28.581129461 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP + +#include "gc/shenandoah/shenandoahPacer.hpp" +#include "runtime/atomic.hpp" + +inline void ShenandoahPacer::report_mark(size_t words) { + report_internal(words); + report_progress_internal(words); +} + +inline void ShenandoahPacer::report_evac(size_t words) { + report_internal(words); +} + +inline void ShenandoahPacer::report_updaterefs(size_t words) { + report_internal(words); +} + +inline void ShenandoahPacer::report_alloc(size_t words) { + report_internal(words); +} + +inline void ShenandoahPacer::report_internal(size_t words) { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); + Atomic::add((intptr_t)words, &_budget); +} + +inline void ShenandoahPacer::report_progress_internal(size_t words) { + assert(ShenandoahPacing, "Only be here when pacing is enabled"); + STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); + Atomic::add((intptr_t)words, &_progress); +} + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPassiveMode.cpp 2020-01-17 17:10:29.172129429 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahPassiveMode.hpp" +#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +void ShenandoahPassiveMode::initialize_flags() const { + // Do not allow concurrent cycles. + FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false); + FLAG_SET_DEFAULT(ShenandoahImplicitGCInvokesConcurrent, false); + + // Passive runs with max speed for allocation, because GC is always STW + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing); + + // No need for evacuation reserve with Full GC, only for Degenerated GC. + if (!ShenandoahDegeneratedGC) { + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0); + } + + // Disable known barriers by default. + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahLoadRefBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier); + + // Final configuration checks + // No barriers are required to run. +} +ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics() const { + if (ShenandoahGCHeuristics != NULL) { + return new ShenandoahPassiveHeuristics(); + } + ShouldNotReachHere(); + return NULL; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPassiveMode.hpp 2020-01-17 17:10:29.778129395 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPASSIVEMODE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHPASSIVEMODE_HPP + +#include "gc/shenandoah/shenandoahNormalMode.hpp" + +class ShenandoahPassiveMode : public ShenandoahNormalMode { +public: + virtual void initialize_flags() const; + virtual ShenandoahHeuristics* initialize_heuristics() const; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp 2020-01-17 17:10:30.378129362 +0100 @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/workerDataArray.inline.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "utilities/ostream.hpp" + +#define GC_PHASE_DECLARE_NAME(type, title) \ + title, + +const char* ShenandoahPhaseTimings::_phase_names[] = { + SHENANDOAH_GC_PHASE_DO(GC_PHASE_DECLARE_NAME) +}; + +#undef GC_PHASE_DECLARE_NAME + +ShenandoahPhaseTimings::ShenandoahPhaseTimings() : _policy(NULL) { + uint max_workers = MAX2(ConcGCThreads, ParallelGCThreads); + _worker_times = new ShenandoahWorkerTimings(max_workers); + _termination_times = new ShenandoahTerminationTimings(max_workers); + _policy = ShenandoahHeap::heap()->shenandoah_policy(); + assert(_policy != NULL, "Can not be NULL"); +} + +void ShenandoahPhaseTimings::record_phase_start(Phase phase) { + _timing_data[phase]._start = os::elapsedTime(); +} + +void ShenandoahPhaseTimings::record_phase_end(Phase phase) { + assert(_policy != NULL, "Not yet initialized"); + double end = os::elapsedTime(); + double elapsed = end - _timing_data[phase]._start; + if (!_policy->is_at_shutdown()) { + _timing_data[phase]._secs.add(elapsed); + } + ShenandoahHeap::heap()->heuristics()->record_phase_time(phase, elapsed); +} + +void ShenandoahPhaseTimings::record_phase_time(Phase phase, double time) { + assert(_policy != NULL, "Not yet initialized"); + if (!_policy->is_at_shutdown()) { + _timing_data[phase]._secs.add(time); + } +} + +void ShenandoahPhaseTimings::record_workers_start(Phase phase) { + for (uint i = 0; i < GCParPhasesSentinel; i++) { + _worker_times->reset(i); + } +} + +void ShenandoahPhaseTimings::record_workers_end(Phase phase) { + if (_policy->is_at_shutdown()) { + // Do not record the past-shutdown events + return; + } + + guarantee(phase == init_evac || + phase == scan_roots || + phase == update_roots || + phase == init_traversal_gc_work || + phase == final_traversal_gc_work || + phase == final_traversal_update_roots || + phase == final_update_refs_roots || + phase == full_gc_roots || + phase == degen_gc_update_roots || + phase == _num_phases, + "only in these phases we can add per-thread phase times"); + if (phase != _num_phases) { + // Merge _phase_time to counters below the given phase. + for (uint i = 0; i < GCParPhasesSentinel; i++) { + double t = _worker_times->average(i); + _timing_data[phase + i + 1]._secs.add(t); + } + } +} + +void ShenandoahPhaseTimings::print_on(outputStream* out) const { + out->cr(); + out->print_cr("GC STATISTICS:"); + out->print_cr(" \"(G)\" (gross) pauses include VM time: time to notify and block threads, do the pre-"); + out->print_cr(" and post-safepoint housekeeping. Use -XX:+PrintSafepointStatistics to dissect."); + out->print_cr(" \"(N)\" (net) pauses are the times spent in the actual GC code."); + out->print_cr(" \"a\" is average time for each phase, look at levels to see if average makes sense."); + out->print_cr(" \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum)."); + out->cr(); + + for (uint i = 0; i < _num_phases; i++) { + if (_timing_data[i]._secs.maximum() != 0) { + print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs)); + } + } +} + +void ShenandoahPhaseTimings::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq) const { + out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = " INT32_FORMAT_W(5) ") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)", + str, + seq->sum(), + seq->avg() * 1000000.0, + seq->num(), + seq->percentile(0) * 1000000.0, + seq->percentile(25) * 1000000.0, + seq->percentile(50) * 1000000.0, + seq->percentile(75) * 1000000.0, + seq->maximum() * 1000000.0 + ); +} + +ShenandoahWorkerTimings::ShenandoahWorkerTimings(uint max_gc_threads) : + _max_gc_threads(max_gc_threads) +{ + assert(max_gc_threads > 0, "Must have some GC threads"); + +#define GC_PAR_PHASE_DECLARE_WORKER_DATA(type, title) \ + _gc_par_phases[ShenandoahPhaseTimings::type] = new WorkerDataArray(max_gc_threads, title); + // Root scanning phases + SHENANDOAH_GC_PAR_PHASE_DO(GC_PAR_PHASE_DECLARE_WORKER_DATA) +#undef GC_PAR_PHASE_DECLARE_WORKER_DATA +} + +// record the time a phase took in seconds +void ShenandoahWorkerTimings::record_time_secs(ShenandoahPhaseTimings::GCParPhases phase, uint worker_i, double secs) { + _gc_par_phases[phase]->set(worker_i, secs); +} + +double ShenandoahWorkerTimings::average(uint i) const { + return _gc_par_phases[i]->average(); +} + +void ShenandoahWorkerTimings::reset(uint i) { + _gc_par_phases[i]->reset(); +} + +void ShenandoahWorkerTimings::print() const { + for (uint i = 0; i < ShenandoahPhaseTimings::GCParPhasesSentinel; i++) { + _gc_par_phases[i]->print_summary_on(tty); + } +} + + +ShenandoahTerminationTimings::ShenandoahTerminationTimings(uint max_gc_threads) { + _gc_termination_phase = new WorkerDataArray(max_gc_threads, "Task Termination (ms):"); +} + +void ShenandoahTerminationTimings::record_time_secs(uint worker_id, double secs) { + if (_gc_termination_phase->get(worker_id) == WorkerDataArray::uninitialized()) { + _gc_termination_phase->set(worker_id, secs); + } else { + // worker may re-enter termination phase + _gc_termination_phase->add(worker_id, secs); + } +} + +void ShenandoahTerminationTimings::print() const { + _gc_termination_phase->print_summary_on(tty); +} + +double ShenandoahTerminationTimings::average() const { + return _gc_termination_phase->average(); +} + +void ShenandoahTerminationTimings::reset() { + _gc_termination_phase->reset(); +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp 2020-01-17 17:10:30.980129329 +0100 @@ -0,0 +1,409 @@ + +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP + +#include "gc/shenandoah/shenandoahNumberSeq.hpp" +#include "gc/shared/workerDataArray.hpp" +#include "memory/allocation.hpp" + +class ShenandoahCollectorPolicy; +class ShenandoahWorkerTimings; +class ShenandoahTerminationTimings; +class outputStream; + +#define SHENANDOAH_GC_PHASE_DO(f) \ + f(total_pause_gross, "Total Pauses (G)") \ + f(total_pause, "Total Pauses (N)") \ + f(init_mark_gross, "Pause Init Mark (G)") \ + f(init_mark, "Pause Init Mark (N)") \ + f(accumulate_stats, " Accumulate Stats") \ + f(make_parsable, " Make Parsable") \ + f(clear_liveness, " Clear Liveness") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(scan_roots, " Scan Roots") \ + f(scan_thread_roots, " S: Thread Roots") \ + f(scan_code_roots, " S: Code Cache Roots") \ + f(scan_string_table_roots, " S: String Table Roots") \ + f(scan_universe_roots, " S: Universe Roots") \ + f(scan_jni_roots, " S: JNI Roots") \ + f(scan_jni_weak_roots, " S: JNI Weak Roots") \ + f(scan_synchronizer_roots, " S: Synchronizer Roots") \ + f(scan_management_roots, " S: Management Roots") \ + f(scan_system_dictionary_roots, " S: System Dict Roots") \ + f(scan_cldg_roots, " S: CLDG Roots") \ + f(scan_jvmti_roots, " S: JVMTI Roots") \ + f(scan_string_dedup_table_roots, " S: Dedup Table Roots") \ + f(scan_string_dedup_queue_roots, " S: Dedup Queue Roots") \ + f(scan_finish_queues, " S: Finish Queues" ) \ + \ + f(resize_tlabs, " Resize TLABs") \ + \ + f(final_mark_gross, "Pause Final Mark (G)") \ + f(final_mark, "Pause Final Mark (N)") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(update_roots, " Update Roots") \ + f(update_thread_roots, " U: Thread Roots") \ + f(update_code_roots, " U: Code Cache Roots") \ + f(update_string_table_roots, " U: String Table Roots") \ + f(update_universe_roots, " U: Universe Roots") \ + f(update_jni_roots, " U: JNI Roots") \ + f(update_jni_weak_roots, " U: JNI Weak Roots") \ + f(update_synchronizer_roots, " U: Synchronizer Roots") \ + f(update_management_roots, " U: Management Roots") \ + f(update_system_dictionary_roots, " U: System Dict Roots") \ + f(update_cldg_roots, " U: CLDG Roots") \ + f(update_jvmti_roots, " U: JVMTI Roots") \ + f(update_string_dedup_table_roots, " U: Dedup Table Roots") \ + f(update_string_dedup_queue_roots, " U: Dedup Queue Roots") \ + f(update_finish_queues, " U: Finish Queues") \ + \ + f(finish_queues, " Finish Queues") \ + f(termination, " Termination") \ + f(weakrefs, " Weak References") \ + f(weakrefs_process, " Process") \ + f(weakrefs_termination, " Termination") \ + f(purge, " System Purge") \ + f(purge_class_unload, " Unload Classes") \ + f(purge_par, " Parallel Cleanup") \ + f(purge_cldg, " CLDG") \ + f(purge_string_dedup, " String Dedup") \ + f(complete_liveness, " Complete Liveness") \ + f(retire_tlabs, " Retire TLABs") \ + f(sync_pinned, " Sync Pinned") \ + f(trash_cset, " Trash CSet") \ + f(prepare_evac, " Prepare Evacuation") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(init_evac, " Initial Evacuation") \ + f(evac_thread_roots, " E: Thread Roots") \ + f(evac_code_roots, " E: Code Cache Roots") \ + f(evac_string_table_roots, " E: String Table Roots") \ + f(evac_universe_roots, " E: Universe Roots") \ + f(evac_jni_roots, " E: JNI Roots") \ + f(evac_jni_weak_roots, " E: JNI Weak Roots") \ + f(evac_synchronizer_roots, " E: Synchronizer Roots") \ + f(evac_management_roots, " E: Management Roots") \ + f(evac_system_dictionary_roots, " E: System Dict Roots") \ + f(evac_cldg_roots, " E: CLDG Roots") \ + f(evac_jvmti_roots, " E: JVMTI Roots") \ + f(evac_string_dedup_table_roots, " E: String Dedup Table Roots") \ + f(evac_string_dedup_queue_roots, " E: String Dedup Queue Roots") \ + f(evac_finish_queues, " E: Finish Queues") \ + \ + f(final_evac_gross, "Pause Final Evac (G)") \ + f(final_evac, "Pause Final Evac (N)") \ + f(final_evac_retire_gclabs, " Retire GCLABs") \ + \ + f(init_update_refs_gross, "Pause Init Update Refs (G)") \ + f(init_update_refs, "Pause Init Update Refs (N)") \ + f(init_update_refs_retire_gclabs, " Retire GCLABs") \ + f(init_update_refs_prepare, " Prepare") \ + \ + f(final_update_refs_gross, "Pause Final Update Refs (G)") \ + f(final_update_refs, "Pause Final Update Refs (N)") \ + f(final_update_refs_finish_work, " Finish Work") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(final_update_refs_roots, " Update Roots") \ + f(final_update_refs_thread_roots, " UR: Thread Roots") \ + f(final_update_refs_code_roots, " UR: Code Cache Roots") \ + f(final_update_refs_string_table_roots, " UR: String Table Roots") \ + f(final_update_refs_universe_roots, " UR: Universe Roots") \ + f(final_update_refs_jni_roots, " UR: JNI Roots") \ + f(final_update_refs_jni_weak_roots, " UR: JNI Weak Roots") \ + f(final_update_refs_synchronizer_roots, " UR: Synchronizer Roots") \ + f(final_update_refs_management_roots, " UR: Management Roots") \ + f(final_update_refs_system_dict_roots, " UR: System Dict Roots") \ + f(final_update_refs_cldg_roots, " UR: CLDG Roots") \ + f(final_update_refs_jvmti_roots, " UR: JVMTI Roots") \ + f(final_update_refs_string_dedup_table_roots, " UR: Dedup Table Roots") \ + f(final_update_refs_string_dedup_queue_roots, " UR: Dedup Queue Roots") \ + f(final_update_refs_finish_queues, " UR: Finish Queues") \ + \ + f(final_update_refs_sync_pinned, " Sync Pinned") \ + f(final_update_refs_trash_cset, " Trash CSet") \ + \ + f(degen_gc_gross, "Pause Degenerated GC (G)") \ + f(degen_gc, "Pause Degenerated GC (N)") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(degen_gc_update_roots, " Degen Update Roots") \ + f(degen_gc_update_thread_roots, " DU: Thread Roots") \ + f(degen_gc_update_code_roots, " DU: Code Cache Roots") \ + f(degen_gc_update_string_table_roots, " DU: String Table Roots") \ + f(degen_gc_update_universe_roots, " DU: Universe Roots") \ + f(degen_gc_update_jni_roots, " DU: JNI Roots") \ + f(degen_gc_update_jni_weak_roots, " DU: JNI Weak Roots") \ + f(degen_gc_update_synchronizer_roots, " DU: Synchronizer Roots") \ + f(degen_gc_update_management_roots, " DU: Management Roots") \ + f(degen_gc_update_system_dict_roots, " DU: System Dict Roots") \ + f(degen_gc_update_cldg_roots, " DU: CLDG Roots") \ + f(degen_gc_update_jvmti_roots, " DU: JVMTI Roots") \ + f(degen_gc_update_string_dedup_table_roots, " DU: Dedup Table Roots") \ + f(degen_gc_update_string_dedup_queue_roots, " DU: Dedup Queue Roots") \ + f(degen_gc_update_finish_queues, " DU: Finish Queues") \ + \ + f(init_traversal_gc_gross, "Pause Init Traversal (G)") \ + f(init_traversal_gc, "Pause Init Traversal (N)") \ + f(traversal_gc_prepare, " Prepare") \ + f(traversal_gc_accumulate_stats, " Accumulate Stats") \ + f(traversal_gc_make_parsable, " Make Parsable") \ + f(traversal_gc_resize_tlabs, " Resize TLABs") \ + f(traversal_gc_prepare_sync_pinned, " Sync Pinned") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(init_traversal_gc_work, " Work") \ + f(init_traversal_gc_thread_roots, " TI: Thread Roots") \ + f(init_traversal_gc_code_roots, " TI: Code Cache Roots") \ + f(init_traversal_gc_string_table_roots, " TI: String Table Roots") \ + f(init_traversal_gc_universe_roots, " TI: Universe Roots") \ + f(init_traversal_gc_jni_roots, " TI: JNI Roots") \ + f(init_traversal_gc_jni_weak_roots, " TI: JNI Weak Roots") \ + f(init_traversal_gc_synchronizer_roots, " TI: Synchronizer Roots") \ + f(init_traversal_gc_management_roots, " TI: Management Roots") \ + f(init_traversal_gc_system_dict_roots, " TI: System Dict Roots") \ + f(init_traversal_gc_cldg_roots, " TI: CLDG Roots") \ + f(init_traversal_gc_jvmti_roots, " TI: JVMTI Roots") \ + f(init_traversal_gc_string_dedup_table_roots, " TI: Dedup Table Roots") \ + f(init_traversal_gc_string_dedup_queue_roots, " TI: Dedup Queue Roots") \ + f(init_traversal_gc_finish_queues, " TI: Finish Queues") \ + \ + f(final_traversal_gc_gross, "Pause Final Traversal (G)") \ + f(final_traversal_gc, "Pause Final Traversal (N)") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(final_traversal_gc_work, " Work") \ + f(final_traversal_gc_thread_roots, " TF: Thread Roots") \ + f(final_traversal_gc_code_roots, " TF: Code Cache Roots") \ + f(final_traversal_gc_string_table_roots, " TF: String Table Roots") \ + f(final_traversal_gc_universe_roots, " TF: Universe Roots") \ + f(final_traversal_gc_jni_roots, " TF: JNI Roots") \ + f(final_traversal_gc_jni_weak_roots, " TF: JNI Weak Roots") \ + f(final_traversal_gc_synchronizer_roots, " TF: Synchronizer Roots") \ + f(final_traversal_gc_management_roots, " TF: Management Roots") \ + f(final_traversal_gc_system_dict_roots, " TF: System Dict Roots") \ + f(final_traversal_gc_cldg_roots, " TF: CLDG Roots") \ + f(final_traversal_gc_jvmti_roots, " TF: JVMTI Roots") \ + f(final_traversal_gc_string_dedup_table_roots, " TF: Dedup Table Roots") \ + f(final_traversal_gc_string_dedup_queue_roots, " TF: Dedup Queue Roots") \ + f(final_traversal_gc_finish_queues, " TF: Finish Queues") \ + f(final_traversal_gc_termination, " TF: Termination") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(final_traversal_update_roots, " Update Roots") \ + f(final_traversal_update_thread_roots, " TU: Thread Roots") \ + f(final_traversal_update_code_roots, " TU: Code Cache Roots") \ + f(final_traversal_update_string_table_roots, " TU: String Table Roots") \ + f(final_traversal_update_universe_roots, " TU: Universe Roots") \ + f(final_traversal_update_jni_roots, " TU: JNI Roots") \ + f(final_traversal_update_jni_weak_roots, " TU: JNI Weak Roots") \ + f(final_traversal_update_synchronizer_roots, " TU: Synchronizer Roots") \ + f(final_traversal_update_management_roots, " TU: Management Roots") \ + f(final_traversal_update_system_dict_roots, " TU: System Dict Roots") \ + f(final_traversal_update_cldg_roots, " TU: CLDG Roots") \ + f(final_traversal_update_jvmti_roots, " TU: JVMTI Roots") \ + f(final_traversal_update_string_dedup_table_roots, " TU: Dedup Table Roots") \ + f(final_traversal_update_string_dedup_queue_roots, " TU: Dedup Queue Roots") \ + f(final_traversal_update_finish_queues, " TU: Finish Queues") \ + \ + f(traversal_gc_sync_pinned, " Sync Pinned") \ + f(traversal_gc_cleanup, " Cleanup") \ + \ + f(full_gc_gross, "Pause Full GC (G)") \ + f(full_gc, "Pause Full GC (N)") \ + f(full_gc_heapdumps, " Heap Dumps") \ + f(full_gc_prepare, " Prepare") \ + \ + /* Per-thread timer block, should have "roots" counters in consistent order */ \ + f(full_gc_roots, " Roots") \ + f(full_gc_thread_roots, " F: Thread Roots") \ + f(full_gc_code_roots, " F: Code Cache Roots") \ + f(full_gc_string_table_roots, " F: String Table Roots") \ + f(full_gc_universe_roots, " F: Universe Roots") \ + f(full_gc_jni_roots, " F: JNI Roots") \ + f(full_gc_jni_weak_roots, " F: JNI Weak Roots") \ + f(full_gc_synchronizer_roots, " F: Synchronizer Roots") \ + f(full_gc_management_roots, " F: Management Roots") \ + f(full_gc_system_dictionary_roots, " F: System Dict Roots") \ + f(full_gc_cldg_roots, " F: CLDG Roots") \ + f(full_gc_jvmti_roots, " F: JVMTI Roots") \ + f(full_gc_string_dedup_table_roots, " F: Dedup Table Roots") \ + f(full_gc_string_dedup_queue_roots, " F: Dedup Queue Roots") \ + f(full_gc_finish_queues, " F: Finish Queues") \ + \ + f(full_gc_mark, " Mark") \ + f(full_gc_mark_finish_queues, " Finish Queues") \ + f(full_gc_mark_termination, " Termination") \ + f(full_gc_weakrefs, " Weak References") \ + f(full_gc_weakrefs_process, " Process") \ + f(full_gc_weakrefs_termination, " Termination") \ + f(full_gc_purge, " System Purge") \ + f(full_gc_purge_class_unload, " Unload Classes") \ + f(full_gc_purge_par, " Parallel Cleanup") \ + f(full_gc_purge_cldg, " CLDG") \ + f(full_gc_purge_string_dedup, " String Dedup") \ + f(full_gc_calculate_addresses, " Calculate Addresses") \ + f(full_gc_calculate_addresses_regular, " Regular Objects") \ + f(full_gc_calculate_addresses_humong, " Humongous Objects") \ + f(full_gc_adjust_pointers, " Adjust Pointers") \ + f(full_gc_copy_objects, " Copy Objects") \ + f(full_gc_copy_objects_regular, " Regular Objects") \ + f(full_gc_copy_objects_humong, " Humongous Objects") \ + f(full_gc_copy_objects_reset_complete, " Reset Complete Bitmap") \ + f(full_gc_copy_objects_rebuild, " Rebuild Region Sets") \ + f(full_gc_resize_tlabs, " Resize TLABs") \ + \ + /* Longer concurrent phases at the end */ \ + f(conc_reset, "Concurrent Reset") \ + f(conc_mark, "Concurrent Marking") \ + f(conc_termination, " Termination") \ + f(conc_preclean, "Concurrent Precleaning") \ + f(conc_evac, "Concurrent Evacuation") \ + f(conc_update_refs, "Concurrent Update Refs") \ + f(conc_cleanup, "Concurrent Cleanup") \ + f(conc_traversal, "Concurrent Traversal") \ + f(conc_traversal_termination, " Termination") \ + \ + f(conc_uncommit, "Concurrent Uncommit") \ + \ + /* Unclassified */ \ + f(pause_other, "Pause Other") \ + f(conc_other, "Concurrent Other") \ + // end + +#define SHENANDOAH_GC_PAR_PHASE_DO(f) \ + f(ThreadRoots, "Thread Roots (ms):") \ + f(CodeCacheRoots, "CodeCache Roots (ms):") \ + f(StringTableRoots, "StringTable Roots (ms):") \ + f(UniverseRoots, "Universe Roots (ms):") \ + f(JNIRoots, "JNI Handles Roots (ms):") \ + f(JNIWeakRoots, "JNI Weak Roots (ms):") \ + f(ObjectSynchronizerRoots, "ObjectSynchronizer Roots (ms):") \ + f(ManagementRoots, "Management Roots (ms):") \ + f(SystemDictionaryRoots, "SystemDictionary Roots (ms):") \ + f(CLDGRoots, "CLDG Roots (ms):") \ + f(JVMTIRoots, "JVMTI Roots (ms):") \ + f(StringDedupTableRoots, "String Dedup Table Roots (ms):") \ + f(StringDedupQueueRoots, "String Dedup Queue Roots (ms):") \ + f(FinishQueues, "Finish Queues (ms):") \ + // end + +class ShenandoahPhaseTimings : public CHeapObj { +public: +#define GC_PHASE_DECLARE_ENUM(type, title) type, + + enum Phase { + SHENANDOAH_GC_PHASE_DO(GC_PHASE_DECLARE_ENUM) + _num_phases + }; + + // These are the subphases of GC phases (scan_roots, update_roots, + // init_evac, final_update_refs_roots and full_gc_roots). + // Make sure they are following this order. + enum GCParPhases { + SHENANDOAH_GC_PAR_PHASE_DO(GC_PHASE_DECLARE_ENUM) + GCParPhasesSentinel + }; + +#undef GC_PHASE_DECLARE_ENUM + +private: + struct TimingData { + HdrSeq _secs; + double _start; + }; + +private: + TimingData _timing_data[_num_phases]; + static const char* _phase_names[_num_phases]; + + ShenandoahWorkerTimings* _worker_times; + ShenandoahTerminationTimings* _termination_times; + + ShenandoahCollectorPolicy* _policy; + +public: + ShenandoahPhaseTimings(); + + ShenandoahWorkerTimings* const worker_times() const { return _worker_times; } + ShenandoahTerminationTimings* const termination_times() const { return _termination_times; } + + // record phase start + void record_phase_start(Phase phase); + // record phase end and return elapsed time in seconds for the phase + void record_phase_end(Phase phase); + // record an elapsed time for the phase + void record_phase_time(Phase phase, double time); + + void record_workers_start(Phase phase); + void record_workers_end(Phase phase); + + static const char* phase_name(Phase phase) { + assert(phase >= 0 && phase < _num_phases, "Out of bound"); + return _phase_names[phase]; + } + + void print_on(outputStream* out) const; + +private: + void init_phase_names(); + void print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq) const; +}; + +class ShenandoahWorkerTimings : public CHeapObj { +private: + uint _max_gc_threads; + WorkerDataArray* _gc_par_phases[ShenandoahPhaseTimings::GCParPhasesSentinel]; + +public: + ShenandoahWorkerTimings(uint max_gc_threads); + + // record the time a phase took in seconds + void record_time_secs(ShenandoahPhaseTimings::GCParPhases phase, uint worker_i, double secs); + + double average(uint i) const; + void reset(uint i); + void print() const; +}; + +class ShenandoahTerminationTimings : public CHeapObj { +private: + WorkerDataArray* _gc_termination_phase; +public: + ShenandoahTerminationTimings(uint max_gc_threads); + + // record the time a phase took in seconds + void record_time_secs(uint worker_i, double secs); + + double average() const; + void reset(); + + void print() const; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHGCPHASETIMEINGS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp 2020-01-17 17:10:31.576129296 +0100 @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "classfile/classLoaderData.hpp" +#include "classfile/stringTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" +#include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "gc/shenandoah/shenandoahVMOperations.hpp" +#include "gc/shared/weakProcessor.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/thread.hpp" +#include "services/management.hpp" + +ShenandoahSerialRoot::ShenandoahSerialRoot(ShenandoahSerialRoot::OopsDo oops_do, ShenandoahPhaseTimings::GCParPhases phase) : + _claimed(false), _oops_do(oops_do), _phase(phase) { +} + +void ShenandoahSerialRoot::oops_do(OopClosure* cl, uint worker_id) { + if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, _phase, worker_id); + _oops_do(cl); + } +} + +ShenandoahSerialRoots::ShenandoahSerialRoots() : + _universe_root(&ShenandoahSerialRoots::universe_oops_do, ShenandoahPhaseTimings::UniverseRoots), + _object_synchronizer_root(&ObjectSynchronizer::oops_do, ShenandoahPhaseTimings::ObjectSynchronizerRoots), + _management_root(&Management::oops_do, ShenandoahPhaseTimings::ManagementRoots), + _system_dictionary_root(&SystemDictionary::oops_do, ShenandoahPhaseTimings::SystemDictionaryRoots), + _jvmti_root(&JvmtiExport::oops_do, ShenandoahPhaseTimings::JVMTIRoots) { +} + +void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) { + _universe_root.oops_do(cl, worker_id); + _object_synchronizer_root.oops_do(cl, worker_id); + _management_root.oops_do(cl, worker_id); + _system_dictionary_root.oops_do(cl, worker_id); + _jvmti_root.oops_do(cl, worker_id); +} + +ShenandoahJNIHandleRoots::ShenandoahJNIHandleRoots() : + ShenandoahSerialRoot(&JNIHandles::oops_do, ShenandoahPhaseTimings::JNIRoots) { +} + +ShenandoahThreadRoots::ShenandoahThreadRoots(bool is_par) : _is_par(is_par) { + Threads::change_thread_claim_parity(); +} + +void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, CodeBlobClosure* code_cl, uint worker_id) { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id); + ResourceMark rm; + Threads::possibly_parallel_oops_do(_is_par, oops_cl, code_cl); +} + +void ShenandoahThreadRoots::threads_do(ThreadClosure* tc, uint worker_id) { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id); + ResourceMark rm; + Threads::possibly_parallel_threads_do(_is_par, tc); +} + +ShenandoahThreadRoots::~ShenandoahThreadRoots() { + Threads::assert_all_threads_claimed(); +} + +ShenandoahWeakRoots::ShenandoahWeakRoots(uint n_workers) : + _par_state_string(StringTable::weak_storage()), + _claimed(false) { +} + +ShenandoahWeakRoots::~ShenandoahWeakRoots() { +} + +ShenandoahStringDedupRoots::ShenandoahStringDedupRoots() { + if (ShenandoahStringDedup::is_enabled()) { + StringDedup::gc_prologue(false); + } +} + +ShenandoahStringDedupRoots::~ShenandoahStringDedupRoots() { + if (ShenandoahStringDedup::is_enabled()) { + StringDedup::gc_epilogue(); + } +} + +void ShenandoahStringDedupRoots::oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::parallel_oops_do(is_alive, keep_alive, worker_id); + } +} + +ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) : + _heap(ShenandoahHeap::heap()), + _phase(phase) { + assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); + _heap->phase_timings()->record_workers_start(_phase); +} + +ShenandoahRootProcessor::~ShenandoahRootProcessor() { + assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); + _heap->phase_timings()->record_workers_end(_phase); +} + +ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase) : + ShenandoahRootProcessor(phase), + _thread_roots(n_workers > 1), + _weak_roots(n_workers) { +} + +void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) { + MarkingCodeBlobClosure blobsCl(oops, CodeBlobToOopClosure::FixRelocations); + CLDToOopClosure clds(oops); + CLDToOopClosure* weak_clds = ShenandoahHeap::heap()->unload_classes() ? NULL : &clds; + + AlwaysTrueClosure always_true; + + _serial_roots.oops_do(oops, worker_id); + _jni_roots.oops_do(oops, worker_id); + + _thread_roots.oops_do(oops, NULL, worker_id); + _cld_roots.cld_do(&clds, worker_id); + _code_roots.code_blobs_do(&blobsCl, worker_id); + + _weak_roots.oops_do(&always_true, oops, worker_id); + _dedup_roots.oops_do(&always_true, oops, worker_id); +} + +ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase, bool update_code_cache) : + ShenandoahRootProcessor(phase), + _thread_roots(n_workers > 1), + _weak_roots(n_workers), + _update_code_cache(update_code_cache) { +} + +ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase) : + ShenandoahRootProcessor(phase), + _thread_roots(n_workers > 1), + _weak_roots(n_workers) { + assert(ShenandoahHeap::heap()->is_full_gc_in_progress(), "Full GC only"); +} + +void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) { + CodeBlobToOopClosure adjust_code_closure(oops, CodeBlobToOopClosure::FixRelocations); + CLDToOopClosure adjust_cld_closure(oops); + AlwaysTrueClosure always_true; + + _serial_roots.oops_do(oops, worker_id); + _jni_roots.oops_do(oops, worker_id); + + _thread_roots.oops_do(oops, NULL, worker_id); + _cld_roots.cld_do(&adjust_cld_closure, worker_id); + _code_roots.code_blobs_do(&adjust_code_closure, worker_id); + + _weak_roots.oops_do(&always_true, oops, worker_id); + _dedup_roots.oops_do(&always_true, oops, worker_id); +} + + ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() : + ShenandoahRootProcessor(ShenandoahPhaseTimings::_num_phases), + _thread_roots(false /*is par*/), + _weak_roots(1) { + } + + void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) { + assert(Thread::current()->is_VM_thread(), "Only by VM thread"); + // Must use _claim_none to avoid interfering with concurrent CLDG iteration + CLDToOopClosure clds(oops, false); + MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations); + ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL); + AlwaysTrueClosure always_true; + ResourceMark rm; + + _serial_roots.oops_do(oops, 0); + _jni_roots.oops_do(oops, 0); + _cld_roots.cld_do(&clds, 0); + _thread_roots.threads_do(&tc_cl, 0); + _code_roots.code_blobs_do(&code, 0); + + _weak_roots.oops_do(&always_true, oops, 0); + _dedup_roots.oops_do(&always_true, oops, 0); + } + + void ShenandoahHeapIterationRootScanner::strong_roots_do(OopClosure* oops) { + assert(Thread::current()->is_VM_thread(), "Only by VM thread"); + // Must use _claim_none to avoid interfering with concurrent CLDG iteration + CLDToOopClosure clds(oops, false); + MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations); + ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL); + ResourceMark rm; + + _serial_roots.oops_do(oops, 0); + _jni_roots.oops_do(oops, 0); + _cld_roots.always_strong_cld_do(&clds, 0); + _thread_roots.threads_do(&tc_cl, 0); + } --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp 2020-01-17 17:10:32.176129263 +0100 @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP + +#include "code/codeCache.hpp" +#include "gc/shared/oopStorageParState.hpp" +#include "gc/shenandoah/shenandoahCodeRoots.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shared/strongRootsScope.hpp" +#include "gc/shared/weakProcessor.hpp" +#include "gc/shared/workgroup.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" + +class ShenandoahSerialRoot { +public: + typedef void (*OopsDo)(OopClosure*); +private: + volatile bool _claimed; + const OopsDo _oops_do; + const ShenandoahPhaseTimings::GCParPhases _phase; + +public: + ShenandoahSerialRoot(OopsDo oops_do, ShenandoahPhaseTimings::GCParPhases); + void oops_do(OopClosure* cl, uint worker_id); +}; + +class ShenandoahSerialRoots { +private: + ShenandoahSerialRoot _universe_root; + ShenandoahSerialRoot _object_synchronizer_root; + ShenandoahSerialRoot _management_root; + ShenandoahSerialRoot _system_dictionary_root; + ShenandoahSerialRoot _jvmti_root; + + // Proxy to make weird Universe::oops_do() signature match OopsDo + static void universe_oops_do(OopClosure* cl) { Universe::oops_do(cl); } + +public: + ShenandoahSerialRoots(); + void oops_do(OopClosure* cl, uint worker_id); +}; + +class ShenandoahJNIHandleRoots : public ShenandoahSerialRoot { +public: + ShenandoahJNIHandleRoots(); +}; + +class ShenandoahThreadRoots { +private: + const bool _is_par; +public: + ShenandoahThreadRoots(bool is_par); + ~ShenandoahThreadRoots(); + + void oops_do(OopClosure* oops_cl, CodeBlobClosure* code_cl, uint worker_id); + void threads_do(ThreadClosure* tc, uint worker_id); +}; + +class ShenandoahWeakRoots { + OopStorage::ParState _par_state_string; + volatile bool _claimed; + +public: + ShenandoahWeakRoots(uint n_workers); + ~ShenandoahWeakRoots(); + + template + void oops_do(IsAlive* is_alive, KeepAlive* keep_alive, uint worker_id); +}; + +class ShenandoahStringDedupRoots { +public: + ShenandoahStringDedupRoots(); + ~ShenandoahStringDedupRoots(); + + void oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id); +}; + +template +class ShenandoahCodeCacheRoots { +private: + ITR _coderoots_iterator; +public: + ShenandoahCodeCacheRoots(); + ~ShenandoahCodeCacheRoots(); + + void code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id); +}; + +template +class ShenandoahClassLoaderDataRoots { +public: + ShenandoahClassLoaderDataRoots(); + + void always_strong_cld_do(CLDClosure* clds, uint worker_id); + void cld_do(CLDClosure* clds, uint worker_id); +}; + +class ShenandoahRootProcessor : public StackObj { +private: + ShenandoahHeap* const _heap; + const ShenandoahPhaseTimings::Phase _phase; +public: + ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase); + ~ShenandoahRootProcessor(); + + ShenandoahHeap* heap() const { return _heap; } +}; + +template +class ShenandoahRootScanner : public ShenandoahRootProcessor { +private: + ShenandoahSerialRoots _serial_roots; + ShenandoahThreadRoots _thread_roots; + ShenandoahCodeCacheRoots _code_roots; + ShenandoahJNIHandleRoots _jni_roots; + ShenandoahClassLoaderDataRoots _cld_roots; +public: + ShenandoahRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase); + + // Apply oops, clds and blobs to all strongly reachable roots in the system, + // during class unloading cycle + void strong_roots_do(uint worker_id, OopClosure* cl); + void strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc = NULL); + + // Apply oops, clds and blobs to all strongly reachable roots and weakly reachable + // roots when class unloading is disabled during this cycle + void roots_do(uint worker_id, OopClosure* cl); + void roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc = NULL); +}; + +typedef ShenandoahRootScanner ShenandoahAllRootScanner; +typedef ShenandoahRootScanner ShenandoahCSetRootScanner; + +// This scanner is only for SH::object_iteration() and only supports single-threaded +// root scanning +class ShenandoahHeapIterationRootScanner : public ShenandoahRootProcessor { +private: + ShenandoahSerialRoots _serial_roots; + ShenandoahThreadRoots _thread_roots; + ShenandoahJNIHandleRoots _jni_roots; + ShenandoahClassLoaderDataRoots _cld_roots; + ShenandoahWeakRoots _weak_roots; + ShenandoahStringDedupRoots _dedup_roots; + ShenandoahCodeCacheRoots _code_roots; + +public: + ShenandoahHeapIterationRootScanner(); + + void roots_do(OopClosure* cl); + void strong_roots_do(OopClosure* cl); +}; + +// Evacuate all roots at a safepoint +class ShenandoahRootEvacuator : public ShenandoahRootProcessor { +private: + ShenandoahSerialRoots _serial_roots; + ShenandoahJNIHandleRoots _jni_roots; + ShenandoahClassLoaderDataRoots _cld_roots; + ShenandoahThreadRoots _thread_roots; + ShenandoahWeakRoots _weak_roots; + ShenandoahStringDedupRoots _dedup_roots; + ShenandoahCodeCacheRoots _code_roots; + +public: + ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase); + + void roots_do(uint worker_id, OopClosure* oops); +}; + +// Update all roots at a safepoint +class ShenandoahRootUpdater : public ShenandoahRootProcessor { +private: + ShenandoahSerialRoots _serial_roots; + ShenandoahJNIHandleRoots _jni_roots; + ShenandoahClassLoaderDataRoots _cld_roots; + ShenandoahThreadRoots _thread_roots; + ShenandoahWeakRoots _weak_roots; + ShenandoahStringDedupRoots _dedup_roots; + ShenandoahCodeCacheRoots _code_roots; + const bool _update_code_cache; + +public: + ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase, bool update_code_cache); + + template + void roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive); +}; + +// Adjuster all roots at a safepoint during full gc +class ShenandoahRootAdjuster : public ShenandoahRootProcessor { +private: + ShenandoahSerialRoots _serial_roots; + ShenandoahJNIHandleRoots _jni_roots; + ShenandoahClassLoaderDataRoots _cld_roots; + ShenandoahThreadRoots _thread_roots; + ShenandoahWeakRoots _weak_roots; + ShenandoahStringDedupRoots _dedup_roots; + ShenandoahCodeCacheRoots _code_roots; + +public: + ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase); + + void roots_do(uint worker_id, OopClosure* oops); +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp 2020-01-17 17:10:32.784129230 +0100 @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP + +#include "classfile/stringTable.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/safepoint.hpp" + +template +void ShenandoahWeakRoots::oops_do(IsAlive* is_alive, KeepAlive* keep_alive, uint worker_id) { + if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) { + WeakProcessor::weak_oops_do(is_alive, keep_alive); + } + StringTable::possibly_parallel_oops_do(&_par_state_string, keep_alive); +} + +template +ShenandoahClassLoaderDataRoots::ShenandoahClassLoaderDataRoots() { + if (!SINGLE_THREADED) { + ClassLoaderDataGraph::clear_claimed_marks(); + } +} + +template +void ShenandoahClassLoaderDataRoots::always_strong_cld_do(CLDClosure* clds, uint worker_id) { + if (SINGLE_THREADED) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread"); + ClassLoaderDataGraph::always_strong_cld_do(clds); + } else { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id); + ClassLoaderDataGraph::always_strong_cld_do(clds); + } +} + +template +void ShenandoahClassLoaderDataRoots::cld_do(CLDClosure* clds, uint worker_id) { + if (SINGLE_THREADED) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread"); + ClassLoaderDataGraph::cld_do(clds); + } else { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id); + ClassLoaderDataGraph::cld_do(clds); + } +} + +template +ShenandoahCodeCacheRoots::ShenandoahCodeCacheRoots() { + nmethod::oops_do_marking_prologue(); +} + +template +void ShenandoahCodeCacheRoots::code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id) { + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); + _coderoots_iterator.possibly_parallel_blobs_do(blob_cl); +} + +template +ShenandoahCodeCacheRoots::~ShenandoahCodeCacheRoots() { + nmethod::oops_do_marking_epilogue(); +} + +class ShenandoahParallelOopsDoThreadClosure : public ThreadClosure { +private: + OopClosure* _f; + CodeBlobClosure* _cf; + ThreadClosure* _thread_cl; +public: + ShenandoahParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf, ThreadClosure* thread_cl) : + _f(f), _cf(cf), _thread_cl(thread_cl) {} + + void do_thread(Thread* t) { + if (_thread_cl != NULL) { + _thread_cl->do_thread(t); + } + t->oops_do(_f, _cf); + } +}; + +template +ShenandoahRootScanner::ShenandoahRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase) : + ShenandoahRootProcessor(phase), + _thread_roots(n_workers > 1) { +} + +template +void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops) { + CLDToOopClosure clds_cl(oops); + MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations); + roots_do(worker_id, oops, &clds_cl, &blobs_cl); +} + +template +void ShenandoahRootScanner::strong_roots_do(uint worker_id, OopClosure* oops) { + CLDToOopClosure clds_cl(oops); + MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations); + strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl); +} + +template +void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure *tc) { + assert(!ShenandoahHeap::heap()->unload_classes() || + ShenandoahHeap::heap()->is_traversal_mode(), + "No class unloading or traversal GC"); + ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc); + ResourceMark rm; + + _serial_roots.oops_do(oops, worker_id); + _jni_roots.oops_do(oops, worker_id); + + if (clds != NULL) { + _cld_roots.cld_do(clds, worker_id); + } else { + assert(ShenandoahHeap::heap()->is_concurrent_traversal_in_progress(), "Only possible with traversal GC"); + } + + _thread_roots.threads_do(&tc_cl, worker_id); + + // With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here, + // and instead do that in concurrent phase under the relevant lock. This saves init mark + // pause time. + if (code != NULL && !ShenandoahConcurrentScanCodeRoots) { + _code_roots.code_blobs_do(code, worker_id); + } +} + +template +void ShenandoahRootScanner::strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc) { + assert(ShenandoahHeap::heap()->unload_classes(), "Should be used during class unloading"); + ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc); + ResourceMark rm; + + _serial_roots.oops_do(oops, worker_id); + _jni_roots.oops_do(oops, worker_id); + _cld_roots.always_strong_cld_do(clds, worker_id); + _thread_roots.threads_do(&tc_cl, worker_id); +} + +template +void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) { + CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations); + CLDToOopClosure clds(keep_alive); + + _serial_roots.oops_do(keep_alive, worker_id); + _jni_roots.oops_do(keep_alive, worker_id); + + _thread_roots.oops_do(keep_alive, NULL, worker_id); + _cld_roots.cld_do(&clds, worker_id); + + if(_update_code_cache) { + _code_roots.code_blobs_do(&update_blobs, worker_id); + } + + _weak_roots.oops_do(is_alive, keep_alive, worker_id); + _dedup_roots.oops_do(is_alive, keep_alive, worker_id); +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp 2020-01-17 17:10:33.389129196 +0100 @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" + + +#include "classfile/classLoaderData.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahRootVerifier.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shared/weakProcessor.hpp" +#include "memory/universe.hpp" +#include "runtime/thread.hpp" +#include "services/management.hpp" +#include "utilities/debug.hpp" + +// Check for overflow of number of root types. +STATIC_ASSERT((static_cast(ShenandoahRootVerifier::AllRoots) + 1) > static_cast(ShenandoahRootVerifier::AllRoots)); + +ShenandoahRootVerifier::ShenandoahRootVerifier() : _types(AllRoots) { +} + +void ShenandoahRootVerifier::excludes(RootTypes types) { + _types = static_cast(static_cast(_types) & (~static_cast(types))); +} + +bool ShenandoahRootVerifier::verify(RootTypes type) const { + return (_types & type) != 0; +} + +void ShenandoahRootVerifier::oops_do(OopClosure* oops) { + CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); + if (verify(CodeRoots)) { + shenandoah_assert_locked_or_safepoint(CodeCache_lock); + CodeCache::blobs_do(&blobs); + } + + if (verify(CLDGRoots)) { + shenandoah_assert_safepoint(); + CLDToOopClosure clds(oops, false); + ClassLoaderDataGraph::cld_do(&clds); + } + + if (verify(SerialRoots)) { + shenandoah_assert_safepoint(); + Universe::oops_do(oops); + Management::oops_do(oops); + JvmtiExport::oops_do(oops); + ObjectSynchronizer::oops_do(oops); + SystemDictionary::oops_do(oops); + } + + if (verify(JNIHandleRoots)) { + shenandoah_assert_safepoint(); + JNIHandles::oops_do(oops); + } + + if (verify(WeakRoots)) { + shenandoah_assert_safepoint(); + AlwaysTrueClosure always_true; + WeakProcessor::weak_oops_do(&always_true, oops); + } + + if (ShenandoahStringDedup::is_enabled() && verify(StringDedupRoots)) { + shenandoah_assert_safepoint(); + ShenandoahStringDedup::oops_do_slow(oops); + } + + if (verify(ThreadRoots)) { + shenandoah_assert_safepoint(); + // Do thread roots the last. This allows verification code to find + // any broken objects from those special roots first, not the accidental + // dangling reference from the thread root. + Threads::possibly_parallel_oops_do(false, oops, &blobs); + } +} + +void ShenandoahRootVerifier::roots_do(OopClosure* oops) { + shenandoah_assert_safepoint(); + + CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&blobs); + + CLDToOopClosure clds(oops, false); + ClassLoaderDataGraph::cld_do(&clds); + + Universe::oops_do(oops); + Management::oops_do(oops); + JvmtiExport::oops_do(oops); + JNIHandles::oops_do(oops); + ObjectSynchronizer::oops_do(oops); + SystemDictionary::oops_do(oops); + + AlwaysTrueClosure always_true; + WeakProcessor::weak_oops_do(&always_true, oops); + + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::oops_do_slow(oops); + } + + // Do thread roots the last. This allows verification code to find + // any broken objects from those special roots first, not the accidental + // dangling reference from the thread root. + Threads::possibly_parallel_oops_do(false, oops, &blobs); +} + +void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) { + shenandoah_assert_safepoint(); + + CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); + + CLDToOopClosure clds(oops, false); + ClassLoaderDataGraph::roots_cld_do(&clds, NULL); + + Universe::oops_do(oops); + Management::oops_do(oops); + JvmtiExport::oops_do(oops); + JNIHandles::oops_do(oops); + ObjectSynchronizer::oops_do(oops); + SystemDictionary::oops_do(oops); + + // Do thread roots the last. This allows verification code to find + // any broken objects from those special roots first, not the accidental + // dangling reference from the thread root. + Threads::possibly_parallel_oops_do(false, oops, &blobs); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp 2020-01-17 17:10:33.992129163 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP + +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" + +class ShenandoahRootVerifier : public StackObj { +public: + enum RootTypes { + SerialRoots = 1 << 0, + ThreadRoots = 1 << 1, + CodeRoots = 1 << 2, + CLDGRoots = 1 << 3, + WeakRoots = 1 << 4, + StringDedupRoots = 1 << 5, + JNIHandleRoots = 1 << 6, + AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots | JNIHandleRoots) + }; + +private: + RootTypes _types; + +public: + ShenandoahRootVerifier(); + + void excludes(RootTypes types); + void oops_do(OopClosure* cl); + + // Used to seed ShenandoahVerifier, do not honor root type filter + void roots_do(OopClosure* cl); + void strong_roots_do(OopClosure* cl); +private: + bool verify(RootTypes type) const; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp 2020-01-17 17:10:34.598129130 +0100 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp" +#include "gc/shenandoah/shenandoahRuntime.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/copy.hpp" + +void ShenandoahRuntime::write_ref_array_pre_oop_entry(oop* src, oop* dst, size_t length) { + ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_pre(src, dst, length); +} + +void ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length) { + ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_pre(src, dst, length); +} + +void ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry(oop* src, oop* dst, size_t length) { + ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_update(src, length); +} + +void ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length) { + ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_update(src, length); +} + +// Shenandoah pre write barrier slowpath +JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread)) + if (orig == NULL) { + assert(false, "should be optimized out"); + return; + } + shenandoah_assert_correct(NULL, orig); + // store the original value that was in the field reference + assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "Shouldn't be here otherwise"); + ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(orig); +JRT_END + +JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier(oopDesc* src, oop* load_addr)) + return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); +JRT_END + +JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr)) + return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); +JRT_END + +// Shenandoah clone barrier: makes sure that references point to to-space +// in cloned objects. +JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* src)) + oop s = oop(src); + shenandoah_assert_correct(NULL, s); + ShenandoahBarrierSet::barrier_set()->clone_barrier(s); +JRT_END --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp 2020-01-17 17:10:35.208129096 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP + +#include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" + +class HeapWord; +class JavaThread; +class oopDesc; + +class ShenandoahRuntime : public AllStatic { +public: + static void write_ref_array_pre_oop_entry(oop* src, oop* dst, size_t length); + static void write_ref_array_pre_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length); + static void write_ref_array_pre_duinit_oop_entry(oop* src, oop* dst, size_t length); + static void write_ref_array_pre_duinit_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length); + static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread); + + static oopDesc* load_reference_barrier(oopDesc* src, oop* load_addr); + static oopDesc* load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr); + + static void shenandoah_clone_barrier(oopDesc* src); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueue.cpp 2020-01-17 17:10:35.801129063 +0100 @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "gc/shared/collectedHeap.hpp" +#include "memory/allocation.inline.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.hpp" +#include "runtime/threadSMR.hpp" +#include "runtime/vmThread.hpp" +#include "utilities/macros.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahSATBMarkQueue.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" + +ShenandoahSATBMarkQueue::ShenandoahSATBMarkQueue(ShenandoahSATBMarkQueueSet* qset, bool permanent) : + // SATB queues are only active during marking cycles. We create + // them with their active field set to false. If a thread is + // created during a cycle and its SATB queue needs to be activated + // before the thread starts running, we'll need to set its active + // field to true. This is done in G1SBarrierSet::on_thread_attach(). + PtrQueue(qset, permanent, false /* active */) +{ } + +void ShenandoahSATBMarkQueue::flush() { + // Filter now to possibly save work later. If filtering empties the + // buffer then flush_impl can deallocate the buffer. + filter(); + flush_impl(); +} + +// Return true if a SATB buffer entry refers to an object that +// requires marking. +// +// The entry must point into the Shenandoah heap. In particular, it must not +// be a NULL pointer. NULL pointers are pre-filtered and never +// inserted into a SATB buffer. +// +// An entry that is below the NTAMS pointer for the containing heap +// region requires marking. Such an entry must point to a valid object. +// +// An entry that is at least the NTAMS pointer for the containing heap +// region might be any of the following, none of which should be marked. +// +// * A reference to an object allocated since marking started. +// According to SATB, such objects are implicitly kept live and do +// not need to be dealt with via SATB buffer processing. +// +// * A reference to a young generation object. Young objects are +// handled separately and are not marked by concurrent marking. +// +// * A stale reference to a young generation object. If a young +// generation object reference is recorded and not filtered out +// before being moved by a young collection, the reference becomes +// stale. +// +// * A stale reference to an eagerly reclaimed humongous object. If a +// humongous object is recorded and then reclaimed, the reference +// becomes stale. +// +// The stale reference cases are implicitly handled by the NTAMS +// comparison. Because of the possibility of stale references, buffer +// processing must be somewhat circumspect and not assume entries +// in an unfiltered buffer refer to valid objects. + +inline bool retain_entry(const void* entry, ShenandoahHeap* heap) { + return heap->requires_marking(entry); +} + +// This method removes entries from a SATB buffer that will not be +// useful to the concurrent marking threads. Entries are retained if +// they require marking and are not already marked. Retained entries +// are compacted toward the top of the buffer. + +void ShenandoahSATBMarkQueue::filter() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + void** buf = _buf; + + if (buf == NULL) { + // nothing to do + return; + } + + // Two-fingered compaction toward the end. + void** src = &buf[index()]; + void** dst = &buf[capacity()]; + assert(src <= dst, "invariant"); + for ( ; src < dst; ++src) { + // Search low to high for an entry to keep. + void* entry = *src; + if (retain_entry(entry, heap)) { + // Found keeper. Search high to low for an entry to discard. + while (src < --dst) { + if (!retain_entry(*dst, heap)) { + *dst = entry; // Replace discard with keeper. + break; + } + } + // If discard search failed (src == dst), the outer loop will also end. + } + } + // dst points to the lowest retained entry, or the end of the buffer + // if all the entries were filtered out. + set_index(dst - buf); +} + +// This method will first apply the above filtering to the buffer. If +// post-filtering a large enough chunk of the buffer has been cleared +// we can re-use the buffer (instead of enqueueing it) and we can just +// allow the mutator to carry on executing using the same buffer +// instead of replacing it. + +bool ShenandoahSATBMarkQueue::should_enqueue_buffer() { + assert(_lock == NULL || _lock->owned_by_self(), + "we should have taken the lock before calling this"); + + // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering. + + // This method should only be called if there is a non-NULL buffer + // that is full. + assert(index() == 0, "pre-condition"); + assert(_buf != NULL, "pre-condition"); + + filter(); + + size_t cap = capacity(); + size_t percent_used = ((cap - index()) * 100) / cap; + bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent; + + Thread* t = Thread::current(); + if (ShenandoahThreadLocalData::is_force_satb_flush(t)) { + if (!should_enqueue && cap != index()) { + // Non-empty buffer is compacted, and we decided not to enqueue it. + // We still want to know about leftover work in that buffer eventually. + // This avoid dealing with these leftovers during the final-mark, after + // the buffers are drained completely. See JDK-8205353 for more discussion. + should_enqueue = true; + } + ShenandoahThreadLocalData::set_force_satb_flush(t, false); + } + return should_enqueue; +} + +void ShenandoahSATBMarkQueue::apply_closure_and_empty(ShenandoahSATBBufferClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), + "SATB queues must only be processed at safepoints"); + if (_buf != NULL) { + cl->do_buffer(&_buf[index()], size()); + reset(); + } +} + +#ifndef PRODUCT +// Helpful for debugging + +static void print_satb_buffer(const char* name, + void** buf, + size_t index, + size_t capacity) { + tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT + " capacity: " SIZE_FORMAT, + name, p2i(buf), index, capacity); +} + +void ShenandoahSATBMarkQueue::print(const char* name) { + print_satb_buffer(name, _buf, index(), capacity()); +} + +#endif // PRODUCT + +ShenandoahSATBMarkQueueSet::ShenandoahSATBMarkQueueSet() : + PtrQueueSet(), + _shared_satb_queue(this, true /* permanent */) { } + +void ShenandoahSATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, + int process_completed_threshold, + Mutex* lock) { + PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1); + _shared_satb_queue.set_lock(lock); +} + +#ifdef ASSERT +void ShenandoahSATBMarkQueueSet::dump_active_states(bool expected_active) { + log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE"); + log_error(gc, verify)("Actual SATB active states:"); + log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE"); + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), satb_queue_for_thread(t).is_active() ? "ACTIVE" : "INACTIVE"); + } + log_error(gc, verify)(" Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE"); +} + +void ShenandoahSATBMarkQueueSet::verify_active_states(bool expected_active) { + // Verify queue set state + if (is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "SATB queue set has an unexpected active state"); + } + + // Verify thread queue states + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + if (satb_queue_for_thread(t).is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "Thread SATB queue has an unexpected active state"); + } + } + + // Verify shared queue state + if (shared_satb_queue()->is_active() != expected_active) { + dump_active_states(expected_active); + guarantee(false, "Shared SATB queue has an unexpected active state"); + } +} +#endif // ASSERT + +void ShenandoahSATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); +#ifdef ASSERT + verify_active_states(expected_active); +#endif // ASSERT + _all_active = active; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + satb_queue_for_thread(t).set_active(active); + } + shared_satb_queue()->set_active(active); +} + +void ShenandoahSATBMarkQueueSet::filter_thread_buffers() { + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + satb_queue_for_thread(t).filter(); + } + shared_satb_queue()->filter(); +} + +bool ShenandoahSATBMarkQueueSet::apply_closure_to_completed_buffer(ShenandoahSATBBufferClosure* cl) { + BufferNode* nd = NULL; + { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + if (_completed_buffers_head != NULL) { + nd = _completed_buffers_head; + _completed_buffers_head = nd->next(); + if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL; + _n_completed_buffers--; + if (_n_completed_buffers == 0) _process_completed = false; + } + } + if (nd != NULL) { + void **buf = BufferNode::make_buffer_from_node(nd); + size_t index = nd->index(); + size_t size = buffer_size(); + assert(index <= size, "invariant"); + cl->do_buffer(buf + index, size - index); + deallocate_buffer(nd); + return true; + } else { + return false; + } +} + +#ifndef PRODUCT +// Helpful for debugging + +#define SATB_PRINTER_BUFFER_SIZE 256 + +void ShenandoahSATBMarkQueueSet::print_all(const char* msg) { + char buffer[SATB_PRINTER_BUFFER_SIZE]; + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + + tty->cr(); + tty->print_cr("SATB BUFFERS [%s]", msg); + + BufferNode* nd = _completed_buffers_head; + int i = 0; + while (nd != NULL) { + void** buf = BufferNode::make_buffer_from_node(nd); + jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); + print_satb_buffer(buffer, buf, nd->index(), buffer_size()); + nd = nd->next(); + i += 1; + } + + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name()); + satb_queue_for_thread(t).print(buffer); + } + + shared_satb_queue()->print("Shared"); + + tty->cr(); +} +#endif // PRODUCT + +void ShenandoahSATBMarkQueueSet::abandon_partial_marking() { + BufferNode* buffers_to_delete = NULL; + { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + while (_completed_buffers_head != NULL) { + BufferNode* nd = _completed_buffers_head; + _completed_buffers_head = nd->next(); + nd->set_next(buffers_to_delete); + buffers_to_delete = nd; + } + _completed_buffers_tail = NULL; + _n_completed_buffers = 0; + DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); + } + while (buffers_to_delete != NULL) { + BufferNode* nd = buffers_to_delete; + buffers_to_delete = nd->next(); + deallocate_buffer(nd); + } + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + // So we can safely manipulate these queues. + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { + satb_queue_for_thread(t).reset(); + } + shared_satb_queue()->reset(); +} + +ShenandoahSATBMarkQueue& ShenandoahSATBMarkQueueSet::satb_queue_for_thread(Thread* t) { + return ShenandoahThreadLocalData::satb_mark_queue(t); +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueue.hpp 2020-01-17 17:10:36.420129029 +0100 @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SATBMARKQUEUE_HPP +#define SHARE_VM_GC_SHENANDOAH_SATBMARKQUEUE_HPP + +#include "gc/g1/ptrQueue.hpp" +#include "memory/allocation.hpp" + +class JavaThread; +class ShenandoahSATBMarkQueueSet; + +// Base class for processing the contents of a SATB buffer. +class ShenandoahSATBBufferClosure : public StackObj { +protected: + ~ShenandoahSATBBufferClosure() { } + +public: + // Process the SATB entries in the designated buffer range. + virtual void do_buffer(void** buffer, size_t size) = 0; +}; + +// A PtrQueue whose elements are (possibly stale) pointers to object heads. +class ShenandoahSATBMarkQueue: public PtrQueue { + friend class ShenandoahSATBMarkQueueSet; + +private: + // Filter out unwanted entries from the buffer. + void filter(); + + template + void filter_impl(); + +public: + ShenandoahSATBMarkQueue(ShenandoahSATBMarkQueueSet* qset, bool permanent = false); + + // Process queue entries and free resources. + void flush(); + + // Apply cl to the active part of the buffer. + // Prerequisite: Must be at a safepoint. + void apply_closure_and_empty(ShenandoahSATBBufferClosure* cl); + + // Overrides PtrQueue::should_enqueue_buffer(). See the method's + // definition for more information. + virtual bool should_enqueue_buffer(); + +#ifndef PRODUCT + // Helpful for debugging + void print(const char* name); +#endif // PRODUCT + + // Compiler support. + static ByteSize byte_offset_of_index() { + return PtrQueue::byte_offset_of_index(); + } + using PtrQueue::byte_width_of_index; + + static ByteSize byte_offset_of_buf() { + return PtrQueue::byte_offset_of_buf(); + } + using PtrQueue::byte_width_of_buf; + + static ByteSize byte_offset_of_active() { + return PtrQueue::byte_offset_of_active(); + } + using PtrQueue::byte_width_of_active; + +}; + +class ShenandoahSATBMarkQueueSet: public PtrQueueSet { + ShenandoahSATBMarkQueue _shared_satb_queue; + +#ifdef ASSERT + void dump_active_states(bool expected_active); + void verify_active_states(bool expected_active); +#endif // ASSERT + +public: + ShenandoahSATBMarkQueueSet(); + + void initialize(Monitor* cbl_mon, Mutex* fl_lock, + int process_completed_threshold, + Mutex* lock); + + ShenandoahSATBMarkQueue& satb_queue_for_thread(Thread* t); + + // Apply "set_active(active)" to all SATB queues in the set. It should be + // called only with the world stopped. The method will assert that the + // SATB queues of all threads it visits, as well as the SATB queue + // set itself, has an active value same as expected_active. + void set_active_all_threads(bool active, bool expected_active); + + // Filter all the currently-active SATB buffers. + void filter_thread_buffers(); + + // If there exists some completed buffer, pop and process it, and + // return true. Otherwise return false. Processing a buffer + // consists of applying the closure to the active range of the + // buffer; the leading entries may be excluded due to filtering. + bool apply_closure_to_completed_buffer(ShenandoahSATBBufferClosure* cl); + +#ifndef PRODUCT + // Helpful for debugging + void print_all(const char* msg); +#endif // PRODUCT + + ShenandoahSATBMarkQueue* shared_satb_queue() { return &_shared_satb_queue; } + + // If a marking is being abandoned, reset any unprocessed log buffers. + void abandon_partial_marking(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SATBMARKQUEUE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp 2020-01-17 17:10:37.016128996 +0100 @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP + +#include "memory/allocation.hpp" +#include "runtime/orderAccess.hpp" + +typedef jbyte ShenandoahSharedValue; + +// Needed for cooperation with generated code. +STATIC_ASSERT(sizeof(ShenandoahSharedValue) == 1); + +typedef struct ShenandoahSharedFlag { + enum { + UNSET = 0, + SET = 1 + }; + + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue)); + volatile ShenandoahSharedValue value; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + ShenandoahSharedFlag() { + unset(); + } + + void set() { + OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET); + } + + void unset() { + OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET); + } + + bool is_set() const { + return OrderAccess::load_acquire(&value) == SET; + } + + bool is_unset() const { + return OrderAccess::load_acquire(&value) == UNSET; + } + + void set_cond(bool val) { + if (val) { + set(); + } else { + unset(); + } + } + + bool try_set() { + if (is_set()) { + return false; + } + ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET); + return old == UNSET; // success + } + + bool try_unset() { + if (!is_set()) { + return false; + } + ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET); + return old == SET; // success + } + + volatile ShenandoahSharedValue* addr_of() { + return &value; + } + +private: + volatile ShenandoahSharedValue* operator&() { + fatal("Use addr_of() instead"); + return NULL; + } + + bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + +} ShenandoahSharedFlag; + +typedef struct ShenandoahSharedBitmap { + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue)); + volatile ShenandoahSharedValue value; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + ShenandoahSharedBitmap() { + clear(); + } + + void set(uint mask) { + assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; + while (true) { + ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); + if ((ov & mask_val) != 0) { + // already set + return; + } + + ShenandoahSharedValue nv = ov | mask_val; + if (Atomic::cmpxchg(nv, &value, ov) == ov) { + // successfully set + return; + } + } + } + + void unset(uint mask) { + assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; + while (true) { + ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); + if ((ov & mask_val) == 0) { + // already unset + return; + } + + ShenandoahSharedValue nv = ov & ~mask_val; + if (Atomic::cmpxchg(nv, &value, ov) == ov) { + // successfully unset + return; + } + } + } + + void clear() { + OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0); + } + + bool is_set(uint mask) const { + return !is_unset(mask); + } + + bool is_unset(uint mask) const { + assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; + } + + bool is_clear() const { + return (OrderAccess::load_acquire(&value)) == 0; + } + + void set_cond(uint mask, bool val) { + if (val) { + set(mask); + } else { + unset(mask); + } + } + + volatile ShenandoahSharedValue* addr_of() { + return &value; + } + + ShenandoahSharedValue raw_value() const { + return value; + } + +private: + volatile ShenandoahSharedValue* operator&() { + fatal("Use addr_of() instead"); + return NULL; + } + + bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } + +} ShenandoahSharedBitmap; + +template +struct ShenandoahSharedEnumFlag { + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue)); + volatile ShenandoahSharedValue value; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + ShenandoahSharedEnumFlag() { + value = 0; + } + + void set(T v) { + assert (v >= 0, "sanity"); + assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v); + } + + T get() const { + return (T)OrderAccess::load_acquire(&value); + } + + T cmpxchg(T new_value, T expected) { + assert (new_value >= 0, "sanity"); + assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected); + } + + volatile ShenandoahSharedValue* addr_of() { + return &value; + } + +private: + volatile T* operator&() { + fatal("Use addr_of() instead"); + return NULL; + } + + bool operator==(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + bool operator!=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + bool operator> (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + bool operator>=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + bool operator< (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + bool operator<=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } + +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp 2020-01-17 17:10:37.625128963 +0100 @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "gc/shared/stringdedup/stringDedupThread.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.inline.hpp" +#include "logging/log.hpp" +#include "runtime/mutex.hpp" +#include "runtime/mutexLocker.hpp" + +ShenandoahStrDedupQueue::ShenandoahStrDedupQueue() : + _consumer_queue(NULL), + _num_producer_queue(ShenandoahHeap::heap()->max_workers()), + _published_queues(NULL), + _free_list(NULL), + _num_free_buffer(0), + _max_free_buffer(ShenandoahHeap::heap()->max_workers() * 2), + _cancel(false), + _total_buffers(0) { + _producer_queues = NEW_C_HEAP_ARRAY(ShenandoahQueueBuffer*, _num_producer_queue, mtGC); + for (size_t index = 0; index < _num_producer_queue; index ++) { + _producer_queues[index] = NULL; + } +} + +ShenandoahStrDedupQueue::~ShenandoahStrDedupQueue() { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + for (size_t index = 0; index < num_queues(); index ++) { + release_buffers(queue_at(index)); + } + + release_buffers(_free_list); + FREE_C_HEAP_ARRAY(ShenandoahQueueBuffer*, _producer_queues); +} + +void ShenandoahStrDedupQueue::wait_impl() { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + while (_consumer_queue == NULL && !_cancel) { + ml.wait(Mutex::_no_safepoint_check_flag); + assert(_consumer_queue == NULL, "Why wait?"); + _consumer_queue = _published_queues; + _published_queues = NULL; + } +} + +void ShenandoahStrDedupQueue::cancel_wait_impl() { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + _cancel = true; + ml.notify(); +} + +void ShenandoahStrDedupQueue::unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) { + ShenandoahQueueBuffer* q = queue_at(queue); + while (q != NULL) { + q->unlink_or_oops_do(cl); + q = q->next(); + } +} + +ShenandoahQueueBuffer* ShenandoahStrDedupQueue::queue_at(size_t queue_id) const { + assert(queue_id <= num_queues(), "Invalid queue id"); + if (queue_id < _num_producer_queue) { + return _producer_queues[queue_id]; + } else if (queue_id == _num_producer_queue) { + return _consumer_queue; + } else { + assert(queue_id == _num_producer_queue + 1, "Must be"); + return _published_queues; + } +} + +void ShenandoahStrDedupQueue::set_producer_buffer(ShenandoahQueueBuffer* buf, size_t queue_id) { + assert(queue_id < _num_producer_queue, "Not a producer queue id"); + _producer_queues[queue_id] = buf; +} + +void ShenandoahStrDedupQueue::push_impl(uint worker_id, oop string_oop) { + assert(worker_id < _num_producer_queue, "Invalid queue id. Can only push to producer queue"); + assert(ShenandoahStringDedup::is_candidate(string_oop), "Not a candidate"); + + ShenandoahQueueBuffer* buf = queue_at((size_t)worker_id); + + if (buf == NULL) { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + buf = new_buffer(); + set_producer_buffer(buf, worker_id); + } else if (buf->is_full()) { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + buf->set_next(_published_queues); + _published_queues = buf; + buf = new_buffer(); + set_producer_buffer(buf, worker_id); + ml.notify(); + } + + assert(!buf->is_full(), "Sanity"); + buf->push(string_oop); +} + +oop ShenandoahStrDedupQueue::pop_impl() { + assert(Thread::current() == StringDedupThread::thread(), "Must be dedup thread"); + while (true) { + if (_consumer_queue == NULL) { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + _consumer_queue = _published_queues; + _published_queues = NULL; + } + + // there is nothing + if (_consumer_queue == NULL) { + return NULL; + } + + oop obj = NULL; + if (pop_candidate(obj)) { + assert(ShenandoahStringDedup::is_candidate(obj), "Must be a candidate"); + return obj; + } + assert(obj == NULL, "No more candidate"); + } +} + +bool ShenandoahStrDedupQueue::pop_candidate(oop& obj) { + ShenandoahQueueBuffer* to_release = NULL; + bool suc = true; + do { + if (_consumer_queue->is_empty()) { + ShenandoahQueueBuffer* buf = _consumer_queue; + _consumer_queue = _consumer_queue->next(); + buf->set_next(to_release); + to_release = buf; + + if (_consumer_queue == NULL) { + suc = false; + break; + } + } + obj = _consumer_queue->pop(); + } while (obj == NULL); + + if (to_release != NULL) { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + release_buffers(to_release); + } + + return suc; +} + +ShenandoahQueueBuffer* ShenandoahStrDedupQueue::new_buffer() { + assert_lock_strong(StringDedupQueue_lock); + if (_free_list != NULL) { + assert(_num_free_buffer > 0, "Sanity"); + ShenandoahQueueBuffer* buf = _free_list; + _free_list = _free_list->next(); + _num_free_buffer --; + buf->reset(); + return buf; + } else { + assert(_num_free_buffer == 0, "Sanity"); + _total_buffers ++; + return new ShenandoahQueueBuffer; + } +} + +void ShenandoahStrDedupQueue::release_buffers(ShenandoahQueueBuffer* list) { + assert_lock_strong(StringDedupQueue_lock); + while (list != NULL) { + ShenandoahQueueBuffer* tmp = list; + list = list->next(); + if (_num_free_buffer < _max_free_buffer) { + tmp->set_next(_free_list); + _free_list = tmp; + _num_free_buffer ++; + } else { + _total_buffers --; + delete tmp; + } + } +} + +void ShenandoahStrDedupQueue::print_statistics_impl() { + Log(gc, stringdedup) log; + log.debug(" Queue:"); + log.debug(" Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " %s). " SIZE_FORMAT " buffers are on free list", + _total_buffers, + byte_size_in_proper_unit(_total_buffers * sizeof(ShenandoahQueueBuffer)), + proper_unit_for_byte_size(_total_buffers * sizeof(ShenandoahQueueBuffer)), + _num_free_buffer); +} + +class VerifyQueueClosure : public OopClosure { +private: + ShenandoahHeap* _heap; +public: + VerifyQueueClosure(); + + void do_oop(oop* o); + void do_oop(narrowOop* o) { + ShouldNotCallThis(); + } +}; + +VerifyQueueClosure::VerifyQueueClosure() : + _heap(ShenandoahHeap::heap()) { +} + +void VerifyQueueClosure::do_oop(oop* o) { + if (*o != NULL) { + oop obj = *o; + shenandoah_assert_correct(o, obj); + assert(java_lang_String::is_instance(obj), "Object must be a String"); + } +} + +void ShenandoahStrDedupQueue::verify_impl() { + VerifyQueueClosure vcl; + for (size_t index = 0; index < num_queues(); index ++) { + ShenandoahQueueBuffer* buf = queue_at(index); + while (buf != NULL) { + buf->oops_do(&vcl); + buf = buf->next(); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.hpp 2020-01-17 17:10:38.242128929 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP + +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "oops/oop.hpp" + +template +class ShenandoahOopBuffer : public CHeapObj { +private: + oop _buf[buffer_size]; + uint _index; + ShenandoahOopBuffer* _next; + +public: + ShenandoahOopBuffer(); + + bool is_full() const; + bool is_empty() const; + uint size() const; + + void push(oop obj); + oop pop(); + + void reset(); + + void set_next(ShenandoahOopBuffer* next); + ShenandoahOopBuffer* next() const; + + void unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl); + void oops_do(OopClosure* cl); +}; + +typedef ShenandoahOopBuffer<64> ShenandoahQueueBuffer; + +// Muti-producer and single consumer queue set +class ShenandoahStrDedupQueue : public StringDedupQueue { +private: + ShenandoahQueueBuffer** _producer_queues; + ShenandoahQueueBuffer* _consumer_queue; + size_t _num_producer_queue; + + // The queue is used for producers to publish completed buffers + ShenandoahQueueBuffer* _published_queues; + + // Cached free buffers + ShenandoahQueueBuffer* _free_list; + size_t _num_free_buffer; + const size_t _max_free_buffer; + + bool _cancel; + + // statistics + size_t _total_buffers; + +private: + ~ShenandoahStrDedupQueue(); + +public: + ShenandoahStrDedupQueue(); + + void wait_impl(); + void cancel_wait_impl(); + + void push_impl(uint worker_id, oop string_oop); + oop pop_impl(); + + void unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue); + + void print_statistics_impl(); + void verify_impl(); + +protected: + size_t num_queues() const { return (_num_producer_queue + 2); } + +private: + ShenandoahQueueBuffer* new_buffer(); + + void release_buffers(ShenandoahQueueBuffer* list); + + ShenandoahQueueBuffer* queue_at(size_t queue_id) const; + + bool pop_candidate(oop& obj); + + void set_producer_buffer(ShenandoahQueueBuffer* buf, size_t queue_id); + + void verify(ShenandoahQueueBuffer* head); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.inline.hpp 2020-01-17 17:10:38.861128895 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP + +#include "gc/shenandoah/shenandoahStrDedupQueue.hpp" + +template +ShenandoahOopBuffer::ShenandoahOopBuffer() : + _index(0), _next(NULL) { +} + +template +bool ShenandoahOopBuffer::is_full() const { + return _index >= buffer_size; +} + +template +bool ShenandoahOopBuffer::is_empty() const { + return _index == 0; +} + +template +uint ShenandoahOopBuffer::size() const { + return _index; +} + +template +void ShenandoahOopBuffer::push(oop obj) { + assert(!is_full(), "Buffer is full"); + _buf[_index ++] = obj; +} + +template +oop ShenandoahOopBuffer::pop() { + assert(!is_empty(), "Buffer is empty"); + return _buf[--_index]; +} + +template +void ShenandoahOopBuffer::set_next(ShenandoahOopBuffer* next) { + _next = next; +} + +template +ShenandoahOopBuffer* ShenandoahOopBuffer::next() const { + return _next; +} + +template +void ShenandoahOopBuffer::reset() { + _index = 0; + _next = NULL; +} + +template +void ShenandoahOopBuffer::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) { + for (uint index = 0; index < size(); index ++) { + oop* obj_addr = &_buf[index]; + if (*obj_addr != NULL) { + if (cl->is_alive(*obj_addr)) { + cl->keep_alive(obj_addr); + } else { + *obj_addr = NULL; + } + } + } +} + +template +void ShenandoahOopBuffer::oops_do(OopClosure* cl) { + for (uint index = 0; index < size(); index ++) { + cl->do_oop(&_buf[index]); + } +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp 2020-01-17 17:10:39.468128861 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/stringdedup/stringDedup.inline.hpp" +#include "gc/shared/workgroup.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahStrDedupQueue.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "runtime/thread.hpp" + +void ShenandoahStringDedup::initialize() { + assert(UseShenandoahGC, "String deduplication available with Shenandoah GC"); + StringDedup::initialize_impl(); +} + +/* Enqueue candidates for deduplication. + * The method should only be called by GC worker threads during marking phases. + */ +void ShenandoahStringDedup::enqueue_candidate(oop java_string) { + assert(Thread::current()->is_Worker_thread(), + "Only from a GC worker thread"); + + if (java_string->age() <= StringDeduplicationAgeThreshold) { + const markOop mark = java_string->mark(); + + // Having/had displaced header, too risk to deal with them, skip + if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) { + return; + } + + // Increase string age and enqueue it when it rearches age threshold + markOop new_mark = mark->incr_age(); + if (mark == java_string->cas_set_mark(new_mark, mark)) { + if (mark->age() == StringDeduplicationAgeThreshold) { + StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string); + } + } + } +} + +// Deduplicate a string, return true if it is deduplicated. +void ShenandoahStringDedup::deduplicate(oop java_string) { + assert(is_enabled(), "String deduplication not enabled"); + StringDedupStat dummy; // Statistics from this path is never used + StringDedupTable::deduplicate(java_string, &dummy); +} + +void ShenandoahStringDedup::parallel_oops_do(BoolObjectClosure* is_alive, OopClosure* cl, uint worker_id) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(is_enabled(), "String deduplication not enabled"); + + ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); + + StringDedupUnlinkOrOopsDoClosure sd_cl(is_alive, cl); + + { + ShenandoahWorkerTimingsTracker x(worker_times, ShenandoahPhaseTimings::StringDedupQueueRoots, worker_id); + StringDedupQueue::unlink_or_oops_do(&sd_cl); + } + { + ShenandoahWorkerTimingsTracker x(worker_times, ShenandoahPhaseTimings::StringDedupTableRoots, worker_id); + StringDedupTable::unlink_or_oops_do(&sd_cl, worker_id); + } +} + +void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(is_enabled(), "String deduplication not enabled"); + AlwaysTrueClosure always_true; + StringDedupUnlinkOrOopsDoClosure sd_cl(&always_true, cl); + StringDedupQueue::unlink_or_oops_do(&sd_cl); + StringDedupTable::unlink_or_oops_do(&sd_cl, 0); +} + +class ShenandoahIsMarkedNextClosure : public BoolObjectClosure { +private: + ShenandoahMarkingContext* const _mark_context; + +public: + ShenandoahIsMarkedNextClosure() : _mark_context(ShenandoahHeap::heap()->marking_context()) { } + + bool do_object_b(oop obj) { + return _mark_context->is_marked(obj); + } +}; + +void ShenandoahStringDedup::parallel_cleanup() { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + log_debug(gc, stringdedup)("String dedup cleanup"); + ShenandoahIsMarkedNextClosure cl; + + unlink_or_oops_do(&cl, NULL, true); +} + +// +// Task for parallel unlink_or_oops_do() operation on the deduplication queue +// and table. +// +class ShenandoahStringDedupUnlinkOrOopsDoTask : public AbstractGangTask { +private: + StringDedupUnlinkOrOopsDoClosure _cl; + +public: + ShenandoahStringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + bool allow_resize_and_rehash) : + AbstractGangTask("StringDedupUnlinkOrOopsDoTask"), + _cl(is_alive, keep_alive) { + StringDedup::gc_prologue(allow_resize_and_rehash); + } + + ~ShenandoahStringDedupUnlinkOrOopsDoTask() { + StringDedup::gc_epilogue(); + } + + virtual void work(uint worker_id) { + StringDedupQueue::unlink_or_oops_do(&_cl); + StringDedupTable::unlink_or_oops_do(&_cl, worker_id); + } +}; + +void ShenandoahStringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + bool allow_resize_and_rehash) { + assert(is_enabled(), "String deduplication not enabled"); + + ShenandoahStringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->workers()->run_task(&task); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.hpp 2020-01-17 17:10:40.068128828 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP + +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "memory/iterator.hpp" + +class ShenandoahStringDedup : public StringDedup { +public: + // Initialize string deduplication. + static void initialize(); + + // Enqueue a string to worker's local string dedup queue + static void enqueue_candidate(oop java_string); + + // Deduplicate a string, the call is lock-free + static void deduplicate(oop java_string); + + static void parallel_oops_do(BoolObjectClosure* is_alive, OopClosure* cl, uint worker_id); + static void oops_do_slow(OopClosure* cl); + + // Parallel cleanup string dedup queues/table + static void parallel_cleanup(); + + static inline bool is_candidate(oop obj); + + static void unlink_or_oops_do(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + bool allow_resize_and_rehash); + +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.inline.hpp 2020-01-17 17:10:40.666128795 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP + +#include "classfile/javaClasses.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" + +bool ShenandoahStringDedup::is_candidate(oop obj) { + return java_lang_String::is_instance_inlined(obj) && + java_lang_String::value(obj) != NULL; +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.cpp 2020-01-17 17:10:41.275128762 +0100 @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" + +void ShenandoahObjToScanQueueSet::clear() { + uint size = GenericTaskQueueSet::size(); + for (uint index = 0; index < size; index ++) { + ShenandoahObjToScanQueue* q = queue(index); + assert(q != NULL, "Sanity"); + q->clear(); + } +} + +bool ShenandoahObjToScanQueueSet::is_empty() { + uint size = GenericTaskQueueSet::size(); + for (uint index = 0; index < size; index ++) { + ShenandoahObjToScanQueue* q = queue(index); + assert(q != NULL, "Sanity"); + if (!q->is_empty()) { + return false; + } + } + return true; +} + +bool ShenandoahTaskTerminator::offer_termination(ShenandoahTerminatorTerminator* terminator) { + assert(_n_threads > 0, "Initialization is incorrect"); + assert(_offered_termination < _n_threads, "Invariant"); + assert(_blocker != NULL, "Invariant"); + + // single worker, done + if (_n_threads == 1) { + return true; + } + + _blocker->lock_without_safepoint_check(); + // all arrived, done + if (++ _offered_termination == _n_threads) { + _blocker->notify_all(); + _blocker->unlock(); + return true; + } + + Thread* the_thread = Thread::current(); + while (true) { + if (_spin_master == NULL) { + _spin_master = the_thread; + + _blocker->unlock(); + + if (do_spin_master_work(terminator)) { + assert(_offered_termination == _n_threads, "termination condition"); + return true; + } else { + _blocker->lock_without_safepoint_check(); + } + } else { + _blocker->wait(true, WorkStealingSleepMillis); + + if (_offered_termination == _n_threads) { + _blocker->unlock(); + return true; + } + } + + if (peek_in_queue_set() || (terminator != NULL && terminator->should_exit_termination())) { + _offered_termination --; + _blocker->unlock(); + return false; + } + } +} + +#if TASKQUEUE_STATS +void ShenandoahObjToScanQueueSet::print_taskqueue_stats_hdr(outputStream* const st) { + st->print_raw_cr("GC Task Stats"); + st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); + st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); +} + +void ShenandoahObjToScanQueueSet::print_taskqueue_stats() const { + if (!log_develop_is_enabled(Trace, gc, task, stats)) { + return; + } + Log(gc, task, stats) log; + ResourceMark rm; + LogStream ls(log.trace()); + outputStream* st = &ls; + print_taskqueue_stats_hdr(st); + + ShenandoahObjToScanQueueSet* queues = const_cast(this); + TaskQueueStats totals; + const uint n = size(); + for (uint i = 0; i < n; ++i) { + st->print(UINT32_FORMAT_W(3), i); + queues->queue(i)->stats.print(st); + st->cr(); + totals += queues->queue(i)->stats; + } + st->print("tot "); totals.print(st); st->cr(); + DEBUG_ONLY(totals.verify()); + +} + +void ShenandoahObjToScanQueueSet::reset_taskqueue_stats() { + const uint n = size(); + for (uint i = 0; i < n; ++i) { + queue(i)->stats.reset(); + } +} +#endif // TASKQUEUE_STATS + +bool ShenandoahTaskTerminator::do_spin_master_work(ShenandoahTerminatorTerminator* terminator) { + uint yield_count = 0; + // Number of hard spin loops done since last yield + uint hard_spin_count = 0; + // Number of iterations in the hard spin loop. + uint hard_spin_limit = WorkStealingHardSpins; + + // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. + // If it is greater than 0, then start with a small number + // of spins and increase number with each turn at spinning until + // the count of hard spins exceeds WorkStealingSpinToYieldRatio. + // Then do a yield() call and start spinning afresh. + if (WorkStealingSpinToYieldRatio > 0) { + hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; + hard_spin_limit = MAX2(hard_spin_limit, 1U); + } + // Remember the initial spin limit. + uint hard_spin_start = hard_spin_limit; + + // Loop waiting for all threads to offer termination or + // more work. + while (true) { + // Look for more work. + // Periodically sleep() instead of yield() to give threads + // waiting on the cores the chance to grab this code + if (yield_count <= WorkStealingYieldsBeforeSleep) { + // Do a yield or hardspin. For purposes of deciding whether + // to sleep, count this as a yield. + yield_count++; + + // Periodically call yield() instead spinning + // After WorkStealingSpinToYieldRatio spins, do a yield() call + // and reset the counts and starting limit. + if (hard_spin_count > WorkStealingSpinToYieldRatio) { + yield(); + hard_spin_count = 0; + hard_spin_limit = hard_spin_start; +#ifdef TRACESPINNING + _total_yields++; +#endif + } else { + // Hard spin this time + // Increase the hard spinning period but only up to a limit. + hard_spin_limit = MIN2(2*hard_spin_limit, + (uint) WorkStealingHardSpins); + for (uint j = 0; j < hard_spin_limit; j++) { + SpinPause(); + } + hard_spin_count++; +#ifdef TRACESPINNING + _total_spins++; +#endif + } + } else { + log_develop_trace(gc, task)("ShenanddoahTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", + p2i(Thread::current()), yield_count); + yield_count = 0; + + MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); // no safepoint check + _spin_master = NULL; + locker.wait(Mutex::_no_safepoint_check_flag, WorkStealingSleepMillis); + if (_spin_master == NULL) { + _spin_master = Thread::current(); + } else { + return false; + } + } + +#ifdef TRACESPINNING + _total_peeks++; +#endif + size_t tasks = tasks_in_queue_set(); + if (tasks > 0 || (terminator != NULL && terminator->should_exit_termination())) { + MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); // no safepoint check + + if (tasks >= _offered_termination - 1) { + locker.notify_all(); + } else { + for (; tasks > 1; tasks --) { + locker.notify(); + } + } + _spin_master = NULL; + return false; + } else if (_offered_termination == _n_threads) { + return true; + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp 2020-01-17 17:10:41.873128729 +0100 @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP + +#include "gc/shared/taskqueue.hpp" +#include "gc/shared/taskqueue.inline.hpp" +#include "runtime/mutex.hpp" +#include "runtime/thread.hpp" + +template +class BufferedOverflowTaskQueue: public OverflowTaskQueue +{ +public: + typedef OverflowTaskQueue taskqueue_t; + + BufferedOverflowTaskQueue() : _buf_empty(true) {}; + + TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) + + // Push task t into the queue. Returns true on success. + inline bool push(E t); + + // Attempt to pop from the queue. Returns true on success. + inline bool pop(E &t); + + inline void clear(); + + inline bool is_empty() const { + return _buf_empty && taskqueue_t::is_empty(); + } + +private: + bool _buf_empty; + E _elem; +}; + +// ObjArrayChunkedTask +// +// Encodes both regular oops, and the array oops plus chunking data for parallel array processing. +// The design goal is to make the regular oop ops very fast, because that would be the prevailing +// case. On the other hand, it should not block parallel array processing from efficiently dividing +// the array work. +// +// The idea is to steal the bits from the 64-bit oop to encode array data, if needed. For the +// proper divide-and-conquer strategies, we want to encode the "blocking" data. It turns out, the +// most efficient way to do this is to encode the array block as (chunk * 2^pow), where it is assumed +// that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode +// all possible arrays. +// +// |---------oop---------|-pow-|--chunk---| +// 0 49 54 64 +// +// By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1. +// +// This encoding gives a few interesting benefits: +// +// a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task: +// +// |---------oop---------|00000|0000000000| // no chunk data +// +// This helps the most ubiquitous path. The initialization amounts to putting the oop into the word +// with zero padding. Testing for "chunkedness" is testing for zero with chunk mask. +// +// b) Splitting tasks for divide-and-conquer is possible. Suppose we have chunk that covers +// interval [ (C-1)*2^P; C*2^P ). We can then split it into two chunks: +// <2*C - 1, P-1>, that covers interval [ (2*C - 2)*2^(P-1); (2*C - 1)*2^(P-1) ) +// <2*C, P-1>, that covers interval [ (2*C - 1)*2^(P-1); 2*C*2^(P-1) ) +// +// Observe that the union of these two intervals is: +// [ (2*C - 2)*2^(P-1); 2*C*2^(P-1) ) +// +// ...which is the original interval: +// [ (C-1)*2^P; C*2^P ) +// +// c) The divide-and-conquer strategy could even start with chunk <1, round-log2-len(arr)>, and split +// down in the parallel threads, which alleviates the upfront (serial) splitting costs. +// +// Encoding limitations caused by current bitscales mean: +// 10 bits for chunk: max 1024 blocks per array +// 5 bits for power: max 2^32 array +// 49 bits for oop: max 512 TB of addressable space +// +// Stealing bits from oop trims down the addressable space. Stealing too few bits for chunk ID limits +// potential parallelism. Stealing too few bits for pow limits the maximum array size that can be handled. +// In future, these might be rebalanced to favor one degree of freedom against another. For example, +// if/when Arrays 2.0 bring 2^64-sized arrays, we might need to steal another bit for power. We could regain +// some bits back if chunks are counted in ObjArrayMarkingStride units. +// +// There is also a fallback version that uses plain fields, when we don't have enough space to steal the +// bits from the native pointer. It is useful to debug the optimized version. +// + +#ifdef _MSC_VER +#pragma warning(push) +// warning C4522: multiple assignment operators specified +#pragma warning( disable:4522 ) +#endif + +#ifdef _LP64 +#define SHENANDOAH_OPTIMIZED_OBJTASK 1 +#else +#define SHENANDOAH_OPTIMIZED_OBJTASK 0 +#endif + +#if SHENANDOAH_OPTIMIZED_OBJTASK +class ObjArrayChunkedTask +{ +public: + enum { + chunk_bits = 10, + pow_bits = 5, + oop_bits = sizeof(uintptr_t)*8 - chunk_bits - pow_bits + }; + enum { + oop_shift = 0, + pow_shift = oop_shift + oop_bits, + chunk_shift = pow_shift + pow_bits + }; + +public: + ObjArrayChunkedTask(oop o = NULL) { + assert(decode_oop(encode_oop(o)) == o, "oop can be encoded: " PTR_FORMAT, p2i(o)); + _obj = encode_oop(o); + } + ObjArrayChunkedTask(oop o, int chunk, int pow) { + assert(decode_oop(encode_oop(o)) == o, "oop can be encoded: " PTR_FORMAT, p2i(o)); + assert(decode_chunk(encode_chunk(chunk)) == chunk, "chunk can be encoded: %d", chunk); + assert(decode_pow(encode_pow(pow)) == pow, "pow can be encoded: %d", pow); + _obj = encode_oop(o) | encode_chunk(chunk) | encode_pow(pow); + } + ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj) { } + + ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) { + _obj = t._obj; + return *this; + } + volatile ObjArrayChunkedTask& + operator =(const volatile ObjArrayChunkedTask& t) volatile { + (void)const_cast(_obj = t._obj); + return *this; + } + + inline oop decode_oop(uintptr_t val) const { + return (oop) reinterpret_cast((val >> oop_shift) & right_n_bits(oop_bits)); + } + + inline int decode_chunk(uintptr_t val) const { + return (int) ((val >> chunk_shift) & right_n_bits(chunk_bits)); + } + + inline int decode_pow(uintptr_t val) const { + return (int) ((val >> pow_shift) & right_n_bits(pow_bits)); + } + + inline uintptr_t encode_oop(oop obj) const { + return ((uintptr_t)(void*) obj) << oop_shift; + } + + inline uintptr_t encode_chunk(int chunk) const { + return ((uintptr_t) chunk) << chunk_shift; + } + + inline uintptr_t encode_pow(int pow) const { + return ((uintptr_t) pow) << pow_shift; + } + + inline oop obj() const { return decode_oop(_obj); } + inline int chunk() const { return decode_chunk(_obj); } + inline int pow() const { return decode_pow(_obj); } + inline bool is_not_chunked() const { return (_obj & ~right_n_bits(oop_bits + pow_bits)) == 0; } + + DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. + + static uintptr_t max_addressable() { + return nth_bit(oop_bits); + } + + static int chunk_size() { + return nth_bit(chunk_bits); + } + +private: + uintptr_t _obj; +}; +#else +class ObjArrayChunkedTask +{ +public: + enum { + chunk_bits = 10, + pow_bits = 5 + }; +public: + ObjArrayChunkedTask(oop o = NULL, int chunk = 0, int pow = 0): _obj(o) { + assert(0 <= chunk && chunk < nth_bit(chunk_bits), "chunk is sane: %d", chunk); + assert(0 <= pow && pow < nth_bit(pow_bits), "pow is sane: %d", pow); + _chunk = chunk; + _pow = pow; + } + ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj), _chunk(t._chunk), _pow(t._pow) { } + + ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) { + _obj = t._obj; + _chunk = t._chunk; + _pow = t._pow; + return *this; + } + volatile ObjArrayChunkedTask& + operator =(const volatile ObjArrayChunkedTask& t) volatile { + (void)const_cast(_obj = t._obj); + _chunk = t._chunk; + _pow = t._pow; + return *this; + } + + inline oop obj() const { return _obj; } + inline int chunk() const { return _chunk; } + inline int pow() const { return _pow; } + + inline bool is_not_chunked() const { return _chunk == 0; } + + DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. + + static size_t max_addressable() { + return sizeof(oop); + } + + static int chunk_size() { + return nth_bit(chunk_bits); + } + +private: + oop _obj; + int _chunk; + int _pow; +}; +#endif // SHENANDOAH_OPTIMIZED_OBJTASK + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +typedef ObjArrayChunkedTask ShenandoahMarkTask; +typedef BufferedOverflowTaskQueue ShenandoahBufferedOverflowTaskQueue; +typedef Padded ShenandoahObjToScanQueue; + +template +class ParallelClaimableQueueSet: public GenericTaskQueueSet { +private: + DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint)); + volatile jint _claimed_index; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); + + debug_only(uint _reserved; ) + +public: + using GenericTaskQueueSet::size; + +public: + ParallelClaimableQueueSet(int n) : GenericTaskQueueSet(n), _claimed_index(0) { + debug_only(_reserved = 0; ) + } + + void clear_claimed() { _claimed_index = 0; } + T* claim_next(); + + // reserve queues that not for parallel claiming + void reserve(uint n) { + assert(n <= size(), "Sanity"); + _claimed_index = (jint)n; + debug_only(_reserved = n;) + } + + debug_only(uint get_reserved() const { return (uint)_reserved; }) +}; + +template +T* ParallelClaimableQueueSet::claim_next() { + jint size = (jint)GenericTaskQueueSet::size(); + + if (_claimed_index >= size) { + return NULL; + } + + jint index = Atomic::add(1, &_claimed_index); + + if (index <= size) { + return GenericTaskQueueSet::queue((uint)index - 1); + } else { + return NULL; + } +} + +class ShenandoahObjToScanQueueSet: public ParallelClaimableQueueSet { +public: + ShenandoahObjToScanQueueSet(int n) : ParallelClaimableQueueSet(n) {} + + bool is_empty(); + void clear(); + +#if TASKQUEUE_STATS + static void print_taskqueue_stats_hdr(outputStream* const st); + void print_taskqueue_stats() const; + void reset_taskqueue_stats(); +#endif // TASKQUEUE_STATS +}; + +class ShenandoahTerminatorTerminator : public TerminatorTerminator { +private: + ShenandoahHeap* const _heap; +public: + ShenandoahTerminatorTerminator(ShenandoahHeap* const heap) : _heap(heap) { } + // return true, terminates immediately, even if there's remaining work left + virtual bool should_exit_termination() { return _heap->cancelled_gc(); } +}; + +/* + * This is an enhanced implementation of Google's work stealing + * protocol, which is described in the paper: + * Understanding and improving JVM GC work stealing at the data center scale + * (http://dl.acm.org/citation.cfm?id=2926706) + * + * Instead of a dedicated spin-master, our implementation will let spin-master to relinquish + * the role before it goes to sleep/wait, so allows newly arrived thread to compete for the role. + * The intention of above enhancement, is to reduce spin-master's latency on detecting new tasks + * for stealing and termination condition. + */ + +class ShenandoahTaskTerminator: public ParallelTaskTerminator { +private: + Monitor* _blocker; + Thread* _spin_master; + +public: + ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : + ParallelTaskTerminator(n_threads, queue_set), _spin_master(NULL) { + _blocker = new Monitor(Mutex::leaf, "ShenandoahTaskTerminator", false, Monitor::_safepoint_check_never); + } + + ~ShenandoahTaskTerminator() { + assert(_blocker != NULL, "Can not be NULL"); + delete _blocker; + } + + bool offer_termination(ShenandoahTerminatorTerminator* terminator); + bool offer_termination() { return offer_termination((ShenandoahTerminatorTerminator*)NULL); } + +private: + bool offer_termination(TerminatorTerminator* terminator) { + ShouldNotReachHere(); + return false; + } + +private: + size_t tasks_in_queue_set() { return _queue_set->tasks(); } + + /* + * Perform spin-master task. + * return true if termination condition is detected + * otherwise, return false + */ + bool do_spin_master_work(ShenandoahTerminatorTerminator* terminator); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp 2020-01-17 17:10:42.483128695 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP + +#include "gc/shared/taskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "utilities/stack.inline.hpp" + +template +bool BufferedOverflowTaskQueue::pop(E &t) { + if (!_buf_empty) { + t = _elem; + _buf_empty = true; + return true; + } + + if (taskqueue_t::pop_local(t)) { + return true; + } + + return taskqueue_t::pop_overflow(t); +} + +template +inline bool BufferedOverflowTaskQueue::push(E t) { + if (_buf_empty) { + _elem = t; + _buf_empty = false; + } else { + bool pushed = taskqueue_t::push(_elem); + assert(pushed, "overflow queue should always succeed pushing"); + _elem = t; + } + return true; +} + +template +void BufferedOverflowTaskQueue::clear() { + _buf_empty = true; + taskqueue_t::set_empty(); + taskqueue_t::overflow_stack()->clear(); +} + + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp 2020-01-17 17:10:43.092128662 +0100 @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP + +#include "gc/shared/plab.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahSATBMarkQueue.hpp" +#include "runtime/thread.hpp" +#include "utilities/debug.hpp" +#include "utilities/sizes.hpp" + +class ShenandoahThreadLocalData { +public: + static const uint INVALID_WORKER_ID = uint(-1); + +private: + char _gc_state; + char _oom_during_evac; + ShenandoahSATBMarkQueue _satb_mark_queue; + PLAB* _gclab; + size_t _gclab_size; + uint _worker_id; + bool _force_satb_flush; + + ShenandoahThreadLocalData() : + _gc_state(0), + _oom_during_evac(0), + _satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()), + _gclab(NULL), + _gclab_size(0), + _worker_id(INVALID_WORKER_ID), + _force_satb_flush(false) { + } + + ~ShenandoahThreadLocalData() { + if (_gclab != NULL) { + delete _gclab; + } + } + + static ShenandoahThreadLocalData* data(Thread* thread) { + assert(UseShenandoahGC, "Sanity"); + return thread->gc_data(); + } + + static ByteSize satb_mark_queue_offset() { + return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue); + } + +public: + static void create(Thread* thread) { + new (data(thread)) ShenandoahThreadLocalData(); + } + + static void destroy(Thread* thread) { + data(thread)->~ShenandoahThreadLocalData(); + } + + static ShenandoahSATBMarkQueue& satb_mark_queue(Thread* thread) { + return data(thread)->_satb_mark_queue; + } + + static bool is_oom_during_evac(Thread* thread) { + return (data(thread)->_oom_during_evac & 1) == 1; + } + + static void set_oom_during_evac(Thread* thread, bool oom) { + if (oom) { + data(thread)->_oom_during_evac |= 1; + } else { + data(thread)->_oom_during_evac &= ~1; + } + } + + static void set_gc_state(Thread* thread, char gc_state) { + data(thread)->_gc_state = gc_state; + } + + static char gc_state(Thread* thread) { + return data(thread)->_gc_state; + } + + static void set_worker_id(Thread* thread, uint id) { + assert(thread->is_Worker_thread(), "Must be a worker thread"); + data(thread)->_worker_id = id; + } + + static uint worker_id(Thread* thread) { + assert(thread->is_Worker_thread(), "Must be a worker thread"); + return data(thread)->_worker_id; + } + + static void set_force_satb_flush(Thread* thread, bool v) { + data(thread)->_force_satb_flush = v; + } + + static bool is_force_satb_flush(Thread* thread) { + return data(thread)->_force_satb_flush; + } + + static void initialize_gclab(Thread* thread) { + assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs"); + assert(data(thread)->_gclab == NULL, "Only initialize once"); + data(thread)->_gclab = new PLAB(PLAB::min_size()); + data(thread)->_gclab_size = 0; + } + + static PLAB* gclab(Thread* thread) { + return data(thread)->_gclab; + } + + static size_t gclab_size(Thread* thread) { + return data(thread)->_gclab_size; + } + + static void set_gclab_size(Thread* thread, size_t v) { + data(thread)->_gclab_size = v; + } + +#ifdef ASSERT + static void set_evac_allowed(Thread* thread, bool evac_allowed) { + if (evac_allowed) { + data(thread)->_oom_during_evac |= 2; + } else { + data(thread)->_oom_during_evac &= ~2; + } + } + + static bool is_evac_allowed(Thread* thread) { + return (data(thread)->_oom_during_evac & 2) == 2; + } +#endif + + // Offsets + static ByteSize satb_mark_queue_active_offset() { + return satb_mark_queue_offset() + ShenandoahSATBMarkQueue::byte_offset_of_active(); + } + + static ByteSize satb_mark_queue_index_offset() { + return satb_mark_queue_offset() + ShenandoahSATBMarkQueue::byte_offset_of_index(); + } + + static ByteSize satb_mark_queue_buffer_offset() { + return satb_mark_queue_offset() + ShenandoahSATBMarkQueue::byte_offset_of_buf(); + } + + static ByteSize gc_state_offset() { + return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state); + } + +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.cpp 2020-01-17 17:10:43.697128628 +0100 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "runtime/os.hpp" + + +ShenandoahPhaseTimings::Phase ShenandoahTerminationTracker::_current_termination_phase = ShenandoahPhaseTimings::_num_phases; + +ShenandoahWorkerTimingsTracker::ShenandoahWorkerTimingsTracker(ShenandoahWorkerTimings* worker_times, + ShenandoahPhaseTimings::GCParPhases phase, uint worker_id) : + _phase(phase), _worker_times(worker_times), _worker_id(worker_id) { + if (_worker_times != NULL) { + _start_time = os::elapsedTime(); + } +} + +ShenandoahWorkerTimingsTracker::~ShenandoahWorkerTimingsTracker() { + if (_worker_times != NULL) { + _worker_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time); + } + + // Do nothing. Per-worker events are not supported in this JDK. +} + +ShenandoahTerminationTimingsTracker::ShenandoahTerminationTimingsTracker(uint worker_id) : + _worker_id(worker_id) { + if (ShenandoahTerminationTrace) { + _start_time = os::elapsedTime(); + } +} + +ShenandoahTerminationTimingsTracker::~ShenandoahTerminationTimingsTracker() { + if (ShenandoahTerminationTrace) { + ShenandoahHeap::heap()->phase_timings()->termination_times()->record_time_secs(_worker_id, os::elapsedTime() - _start_time); + } +} + +ShenandoahTerminationTracker::ShenandoahTerminationTracker(ShenandoahPhaseTimings::Phase phase) : _phase(phase) { + assert(_current_termination_phase == ShenandoahPhaseTimings::_num_phases, "Should be invalid"); + assert(phase == ShenandoahPhaseTimings::termination || + phase == ShenandoahPhaseTimings::final_traversal_gc_termination || + phase == ShenandoahPhaseTimings::full_gc_mark_termination || + phase == ShenandoahPhaseTimings::conc_termination || + phase == ShenandoahPhaseTimings::conc_traversal_termination || + phase == ShenandoahPhaseTimings::weakrefs_termination || + phase == ShenandoahPhaseTimings::full_gc_weakrefs_termination, + "Only these phases"); + + assert(!Thread::current()->is_Worker_thread() && + (Thread::current()->is_VM_thread() || + Thread::current()->is_ConcurrentGC_thread()), + "Called from wrong thread"); + + _current_termination_phase = phase; + ShenandoahHeap::heap()->phase_timings()->termination_times()->reset(); +} + +ShenandoahTerminationTracker::~ShenandoahTerminationTracker() { + assert(_phase == _current_termination_phase, "Can not change phase"); + ShenandoahPhaseTimings* phase_times = ShenandoahHeap::heap()->phase_timings(); + + double t = phase_times->termination_times()->average(); + phase_times->record_phase_time(_phase, t); + debug_only(_current_termination_phase = ShenandoahPhaseTimings::_num_phases;) +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.hpp 2020-01-17 17:10:44.304128595 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP + +#include "jfr/jfrEvents.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "memory/allocation.hpp" + +class ShenandoahWorkerTimingsTracker : public StackObj { +private: + double _start_time; + ShenandoahPhaseTimings::GCParPhases _phase; + ShenandoahWorkerTimings* _worker_times; + uint _worker_id; + +public: + ShenandoahWorkerTimingsTracker(ShenandoahWorkerTimings* worker_times, ShenandoahPhaseTimings::GCParPhases phase, uint worker_id); + ~ShenandoahWorkerTimingsTracker(); +}; + + +class ShenandoahTerminationTimingsTracker : public StackObj { +private: + double _start_time; + uint _worker_id; + +public: + ShenandoahTerminationTimingsTracker(uint worker_id); + ~ShenandoahTerminationTimingsTracker(); +}; + +// Tracking termination time in specific GC phase +class ShenandoahTerminationTracker : public StackObj { +private: + ShenandoahPhaseTimings::Phase _phase; + + static ShenandoahPhaseTimings::Phase _current_termination_phase; +public: + ShenandoahTerminationTracker(ShenandoahPhaseTimings::Phase phase); + ~ShenandoahTerminationTracker(); + + static ShenandoahPhaseTimings::Phase current_termination_phase() { return _current_termination_phase; } +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTracer.hpp 2020-01-17 17:10:44.901128562 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP + +#include "gc/shared/gcTrace.hpp" + +class ShenandoahTracer : public GCTracer { +public: + ShenandoahTracer() : GCTracer(Shenandoah) {} +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp 2020-01-17 17:10:45.510128528 +0100 @@ -0,0 +1,1101 @@ +/* + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/referenceProcessor.hpp" +#include "gc/shared/referenceProcessorPhaseTimes.hpp" +#include "gc/shared/workgroup.hpp" +#include "gc/shared/weakProcessor.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahCodeRoots.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahTimingTracker.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" + +#include "memory/iterator.hpp" +#include "memory/metaspace.hpp" +#include "memory/resourceArea.hpp" + +/** + * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm. + * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm + * is incremental-update-based. + * + * NOTE on interaction with TAMS: we want to avoid traversing new objects for + * several reasons: + * - We will not reclaim them in this cycle anyway, because they are not in the + * cset + * - It makes up for the bulk of work during final-pause + * - It also shortens the concurrent cycle because we don't need to + * pointlessly traverse through newly allocated objects. + * - As a nice side-effect, it solves the I-U termination problem (mutators + * cannot outrun the GC by allocating like crazy) + * - It is an easy way to achieve MWF. What MWF does is to also enqueue the + * target object of stores if it's new. Treating new objects live implicitely + * achieves the same, but without extra barriers. I think the effect of + * shortened final-pause (mentioned above) is the main advantage of MWF. In + * particular, we will not see the head of a completely new long linked list + * in final-pause and end up traversing huge chunks of the heap there. + * - We don't need to see/update the fields of new objects either, because they + * are either still null, or anything that's been stored into them has been + * evacuated+enqueued before (and will thus be treated later). + * + * We achieve this by setting TAMS for each region, and everything allocated + * beyond TAMS will be 'implicitely marked'. + * + * Gotchas: + * - While we want new objects to be implicitely marked, we don't want to count + * them alive. Otherwise the next cycle wouldn't pick them up and consider + * them for cset. This means that we need to protect such regions from + * getting accidentally thrashed at the end of traversal cycle. This is why I + * keep track of alloc-regions and check is_alloc_region() in the trashing + * code. + * - We *need* to traverse through evacuated objects. Those objects are + * pre-existing, and any references in them point to interesting objects that + * we need to see. We also want to count them as live, because we just + * determined that they are alive :-) I achieve this by upping TAMS + * concurrently for every gclab/gc-shared alloc before publishing the + * evacuated object. This way, the GC threads will not consider such objects + * implictely marked, and traverse through them as normal. + */ +class ShenandoahTraversalSATBBufferClosure : public ShenandoahSATBBufferClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahHeap* const _heap; + +public: + ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()) + { } + + void do_buffer(void** buffer, size_t size) { + for (size_t i = 0; i < size; ++i) { + oop* p = (oop*) &buffer[i]; + oop obj = RawAccess<>::oop_load(p); + shenandoah_assert_not_forwarded(p, obj); + if (_heap->marking_context()->mark(obj)) { + _queue->push(ShenandoahMarkTask(obj)); + } + } + } +}; + +class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure { +private: + ShenandoahTraversalSATBBufferClosure* _satb_cl; + +public: + ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) : + _satb_cl(satb_cl) {} + + void do_thread(Thread* thread) { + if (thread->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thread; + ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl); + } else if (thread->is_VM_thread()) { + ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); + } + } +}; + +// Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal +// and remark them later during final-traversal. +class ShenandoahMarkCLDClosure : public CLDClosure { +private: + OopClosure* _cl; +public: + ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {} + void do_cld(ClassLoaderData* cld) { + cld->oops_do(_cl, true, true); + } +}; + +// Like CLDToOopClosure, but only process modified CLDs +class ShenandoahRemarkCLDClosure : public CLDClosure { +private: + OopClosure* _cl; +public: + ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {} + void do_cld(ClassLoaderData* cld) { + if (cld->has_modified_oops()) { + cld->oops_do(_cl, true, true); + } + } +}; + +class ShenandoahInitTraversalCollectionTask : public AbstractGangTask { +private: + ShenandoahCSetRootScanner* _rp; + ShenandoahHeap* _heap; + ShenandoahCsetCodeRootsIterator* _cset_coderoots; + ShenandoahStringDedupRoots _dedup_roots; + +public: + ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) : + AbstractGangTask("Shenandoah Init Traversal Collection"), + _rp(rp), + _heap(ShenandoahHeap::heap()) {} + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + + ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues(); + ShenandoahObjToScanQueue* q = queues->queue(worker_id); + + bool process_refs = _heap->process_references(); + bool unload_classes = _heap->unload_classes(); + ReferenceProcessor* rp = NULL; + if (process_refs) { + rp = _heap->ref_processor(); + } + + // Step 1: Process ordinary GC roots. + { + ShenandoahTraversalRootsClosure roots_cl(q, rp); + ShenandoahMarkCLDClosure cld_cl(&roots_cl); + MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); + if (unload_classes) { + _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl); + } else { + _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl); + } + + AlwaysTrueClosure is_alive; + _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id); + } + } +}; + +class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask { +private: + ShenandoahTaskTerminator* _terminator; + ShenandoahHeap* _heap; +public: + ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) : + AbstractGangTask("Shenandoah Concurrent Traversal Collection"), + _terminator(terminator), + _heap(ShenandoahHeap::heap()) {} + + void work(uint worker_id) { + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); + ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); + + // Drain all outstanding work in queues. + traversal_gc->main_loop(worker_id, _terminator, true); + } +}; + +class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask { +private: + ShenandoahAllRootScanner* _rp; + ShenandoahTaskTerminator* _terminator; + ShenandoahHeap* _heap; +public: + ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) : + AbstractGangTask("Shenandoah Final Traversal Collection"), + _rp(rp), + _terminator(terminator), + _heap(ShenandoahHeap::heap()) {} + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + + ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); + + ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues(); + ShenandoahObjToScanQueue* q = queues->queue(worker_id); + + bool process_refs = _heap->process_references(); + bool unload_classes = _heap->unload_classes(); + ReferenceProcessor* rp = NULL; + if (process_refs) { + rp = _heap->ref_processor(); + } + + // Step 0: Drain outstanding SATB queues. + // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below. + ShenandoahTraversalSATBBufferClosure satb_cl(q); + { + // Process remaining finished SATB buffers. + ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); + while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); + // Process remaining threads SATB buffers below. + } + + // Step 1: Process GC roots. + // For oops in code roots, they are marked, evacuated, enqueued for further traversal, + // and the references to the oops are updated during init pause. New nmethods are handled + // in similar way during nmethod-register process. Therefore, we don't need to rescan code + // roots here. + if (!_heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalRootsClosure roots_cl(q, rp); + ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); + if (unload_classes) { + ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); + _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc); + } else { + CLDToOopClosure cld_cl(&roots_cl); + _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc); + } + } else { + ShenandoahTraversalDegenClosure roots_cl(q, rp); + ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); + if (unload_classes) { + ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); + _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc); + } else { + CLDToOopClosure cld_cl(&roots_cl); + _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc); + } + } + + { + ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times(); + ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id); + + // Step 3: Finally drain all outstanding work in queues. + traversal_gc->main_loop(worker_id, _terminator, false); + } + + } +}; + +ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : + _heap(heap), + _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), + _traversal_set(ShenandoahHeapRegionSet()) { + + // Traversal does not support concurrent code root scanning + FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false); + + uint num_queues = heap->max_workers(); + for (uint i = 0; i < num_queues; ++i) { + ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); + task_queue->initialize(); + _task_queues->register_queue(i, task_queue); + } +} + +ShenandoahTraversalGC::~ShenandoahTraversalGC() { +} + +void ShenandoahTraversalGC::prepare_regions() { + size_t num_regions = _heap->num_regions(); + ShenandoahMarkingContext* const ctx = _heap->marking_context(); + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = _heap->get_region(i); + if (_heap->is_bitmap_slice_committed(region)) { + if (_traversal_set.is_in(i)) { + ctx->capture_top_at_mark_start(region); + region->clear_live_data(); + assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); + } else { + // Everything outside the traversal set is always considered live. + ctx->reset_top_at_mark_start(region); + } + } else { + // FreeSet may contain uncommitted empty regions, once they are recommitted, + // their TAMS may have old values, so reset them here. + ctx->reset_top_at_mark_start(region); + } + } +} + +void ShenandoahTraversalGC::prepare() { + if (UseTLAB) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats); + _heap->accumulate_statistics_tlabs(); + } + + { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable); + _heap->make_parsable(true); + } + + if (UseTLAB) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs); + _heap->resize_tlabs(); + } + + assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap"); + assert(!_heap->marking_context()->is_complete(), "should not be complete"); + + // About to choose the collection set, make sure we know which regions are pinned. + { + ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned); + _heap->sync_pinned_region_status(); + } + + ShenandoahCollectionSet* collection_set = _heap->collection_set(); + { + ShenandoahHeapLocker lock(_heap->lock()); + + collection_set->clear(); + assert(collection_set->count() == 0, "collection set not clear"); + + // Find collection set + _heap->heuristics()->choose_collection_set(collection_set); + prepare_regions(); + + // Rebuild free set + _heap->free_set()->rebuild(); + } + + log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions", + byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()), + byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()), + collection_set->count()); +} + +void ShenandoahTraversalGC::init_traversal_collection() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC"); + + if (ShenandoahVerify) { + _heap->verifier()->verify_before_traversal(); + } + + if (VerifyBeforeGC) { + Universe::verify(); + } + + { + ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare); + prepare(); + } + + _heap->set_concurrent_traversal_in_progress(true); + + bool process_refs = _heap->process_references(); + if (process_refs) { + ReferenceProcessor* rp = _heap->ref_processor(); + rp->enable_discovery(true /*verify_no_refs*/); + rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); + } + + { + ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work); + assert(_task_queues->is_empty(), "queues must be empty before traversal GC"); + TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + + { + uint nworkers = _heap->workers()->active_workers(); + task_queues()->reserve(nworkers); + ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work); + ShenandoahInitTraversalCollectionTask traversal_task(&rp); + _heap->workers()->run_task(&traversal_task); + } + +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif + } + + if (ShenandoahPacing) { + _heap->pacer()->setup_for_traversal(); + } +} + +void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) { + ShenandoahObjToScanQueue* q = task_queues()->queue(w); + + // Initialize live data. + jushort* ld = _heap->get_liveness_cache(w); + + ReferenceProcessor* rp = NULL; + if (_heap->process_references()) { + rp = _heap->ref_processor(); + } + { + if (!_heap->is_degenerated_gc_in_progress()) { + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahTraversalMetadataDedupClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } else { + ShenandoahTraversalMetadataClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } + } else { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahTraversalDedupClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } else { + ShenandoahTraversalClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } + } + } else { + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } else { + ShenandoahTraversalMetadataDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } + } else { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahTraversalDedupDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } else { + ShenandoahTraversalDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t, sts_yield); + } + } + } + } + + _heap->flush_liveness_cache(w); +} + +template +void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) { + ShenandoahObjToScanQueueSet* queues = task_queues(); + ShenandoahObjToScanQueue* q = queues->queue(worker_id); + ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark(); + + uintx stride = ShenandoahMarkLoopStride; + + ShenandoahMarkTask task; + + // Process outstanding queues, if any. + q = queues->claim_next(); + while (q != NULL) { + if (_heap->check_cancelled_gc_and_yield(sts_yield)) { + return; + } + + for (uint i = 0; i < stride; i++) { + if (q->pop(task)) { + conc_mark->do_task(q, cl, live_data, &task); + } else { + assert(q->is_empty(), "Must be empty"); + q = queues->claim_next(); + break; + } + } + } + + if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; + + // Normal loop. + q = queues->queue(worker_id); + + ShenandoahTraversalSATBBufferClosure drain_satb(q); + ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); + + int seed = 17; + + while (true) { + if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; + + while (satb_mq_set.completed_buffers_num() > 0) { + satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); + } + + uint work = 0; + for (uint i = 0; i < stride; i++) { + if (q->pop(task) || + queues->steal(worker_id, &seed, task)) { + conc_mark->do_task(q, cl, live_data, &task); + work++; + } else { + break; + } + } + + if (work == 0) { + // No more work, try to terminate + ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers); + ShenandoahTerminationTimingsTracker term_tracker(worker_id); + ShenandoahTerminatorTerminator tt(_heap); + + if (terminator->offer_termination(&tt)) return; + } + } +} + +bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) { + if (_heap->cancelled_gc()) { + return true; + } + return false; +} + +void ShenandoahTraversalGC::concurrent_traversal_collection() { + ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal); + if (!_heap->cancelled_gc()) { + uint nworkers = _heap->workers()->active_workers(); + task_queues()->reserve(nworkers); + ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination); + + ShenandoahTaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentTraversalCollectionTask task(&terminator); + _heap->workers()->run_task(&task); + } + + if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) { + preclean_weak_refs(); + } +} + +void ShenandoahTraversalGC::final_traversal_collection() { + _heap->make_parsable(true); + + if (!_heap->cancelled_gc()) { +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work); + uint nworkers = _heap->workers()->active_workers(); + task_queues()->reserve(nworkers); + + // Finish traversal + ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work); + ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination); + + ShenandoahTaskTerminator terminator(nworkers, task_queues()); + ShenandoahFinalTraversalCollectionTask task(&rp, &terminator); + _heap->workers()->run_task(&task); +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif + } + + if (!_heap->cancelled_gc() && _heap->process_references()) { + weak_refs_work(); + } + + if (!_heap->cancelled_gc()) { + assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); + TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); + + // No more marking expected + _heap->mark_complete_marking_context(); + + fixup_roots(); + if (_heap->unload_classes()) { + _heap->unload_classes_and_cleanup_tables(false); + } else { + ShenandoahIsAliveSelector alive; + StringTable::unlink(alive.is_alive_closure()); + } + + // Resize metaspace + MetaspaceGC::compute_new_size(); + + // Need to see that pinned region status is updated: newly pinned regions must not + // be trashed. New unpinned regions should be trashed. + { + ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned); + _heap->sync_pinned_region_status(); + } + + // Still good? We can now trash the cset, and make final verification + { + ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup); + ShenandoahHeapLocker lock(_heap->lock()); + + // Trash everything + // Clear immediate garbage regions. + size_t num_regions = _heap->num_regions(); + + ShenandoahHeapRegionSet* traversal_regions = traversal_set(); + ShenandoahFreeSet* free_regions = _heap->free_set(); + ShenandoahMarkingContext* const ctx = _heap->marking_context(); + free_regions->clear(); + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* r = _heap->get_region(i); + bool not_allocated = ctx->top_at_mark_start(r) == r->top(); + + bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; + if (r->is_humongous_start() && candidate) { + // Trash humongous. + HeapWord* humongous_obj = r->bottom(); + assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked"); + r->make_trash_immediate(); + while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) { + i++; + r = _heap->get_region(i); + assert(r->is_humongous_continuation(), "must be humongous continuation"); + r->make_trash_immediate(); + } + } else if (!r->is_empty() && candidate) { + // Trash regular. + assert(!r->is_humongous(), "handled above"); + assert(!r->is_trash(), "must not already be trashed"); + r->make_trash_immediate(); + } + } + _heap->collection_set()->clear(); + _heap->free_set()->rebuild(); + reset(); + } + + assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); + _heap->set_concurrent_traversal_in_progress(false); + assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here"); + + if (ShenandoahVerify) { + _heap->verifier()->verify_after_traversal(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } + } +} + +class ShenandoahTraversalFixRootsClosure : public OopClosure { +private: + template + inline void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + if (obj != forw) { + RawAccess::oop_store(p, forw); + } + } + } + +public: + inline void do_oop(oop* p) { do_oop_work(p); } + inline void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalFixRootsTask : public AbstractGangTask { +private: + ShenandoahRootUpdater* _rp; + +public: + ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) : + AbstractGangTask("Shenandoah traversal fix roots"), + _rp(rp) { + assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be"); + } + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahTraversalFixRootsClosure cl; + ShenandoahForwardedIsAliveClosure is_alive; + _rp->roots_do(worker_id, &is_alive, &cl); + } +}; + +void ShenandoahTraversalGC::fixup_roots() { +#if COMPILER2_OR_JVMCI + DerivedPointerTable::clear(); +#endif + ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */); + ShenandoahTraversalFixRootsTask update_roots_task(&rp); + _heap->workers()->run_task(&update_roots_task); +#if COMPILER2_OR_JVMCI + DerivedPointerTable::update_pointers(); +#endif +} + +void ShenandoahTraversalGC::reset() { + _task_queues->clear(); +} + +ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() { + return _task_queues; +} + +class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure { +private: + ShenandoahHeap* const _heap; +public: + ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; + virtual bool should_return() { return _heap->cancelled_gc(); } +}; + +class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure { +public: + void do_void() { + ShenandoahHeap* sh = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); + assert(sh->process_references(), "why else would we be here?"); + ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues()); + shenandoah_assert_rp_isalive_installed(); + traversal_gc->main_loop((uint) 0, &terminator, true); + } +}; + +class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + _traversal_gc->process_oop(p, _thread, _queue, _mark_context); + } + +public: + ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), + _mark_context(ShenandoahHeap::heap()->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + _traversal_gc->process_oop(p, _thread, _queue, _mark_context); + } + +public: + ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), + _mark_context(ShenandoahHeap::heap()->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + _traversal_gc->process_oop(p, _thread, _queue, _mark_context); + } + +public: + ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), + _mark_context(ShenandoahHeap::heap()->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + ShenandoahMarkingContext* const _mark_context; + + template + inline void do_oop_work(T* p) { + _traversal_gc->process_oop(p, _thread, _queue, _mark_context); + } + +public: + ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), + _mark_context(ShenandoahHeap::heap()->marking_context()) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahTraversalPrecleanTask : public AbstractGangTask { +private: + ReferenceProcessor* _rp; + +public: + ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) : + AbstractGangTask("Precleaning task"), + _rp(rp) {} + + void work(uint worker_id) { + assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); + + ShenandoahHeap* sh = ShenandoahHeap::heap(); + + ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id); + + ShenandoahForwardedIsAliveClosure is_alive; + ShenandoahTraversalCancelledGCYieldClosure yield; + ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; + ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q); + ResourceMark rm; + _rp->preclean_discovered_references(&is_alive, &keep_alive, + &complete_gc, &yield, + NULL); + } +}; + +void ShenandoahTraversalGC::preclean_weak_refs() { + // Pre-cleaning weak references before diving into STW makes sense at the + // end of concurrent mark. This will filter out the references which referents + // are alive. Note that ReferenceProcessor already filters out these on reference + // discovery, and the bulk of work is done here. This phase processes leftovers + // that missed the initial filtering, i.e. when referent was marked alive after + // reference was discovered by RP. + + assert(_heap->process_references(), "sanity"); + assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase"); + + // Shortcut if no references were discovered to avoid winding up threads. + ReferenceProcessor* rp = _heap->ref_processor(); + if (!rp->has_discovered_references()) { + return; + } + + ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahForwardedIsAliveClosure is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); + + assert(task_queues()->is_empty(), "Should be empty"); + + // Execute precleaning in the worker thread: it will give us GCLABs, String dedup + // queues and other goodies. When upstream ReferenceProcessor starts supporting + // parallel precleans, we can extend this to more threads. + ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false); + + WorkGang* workers = _heap->workers(); + uint nworkers = workers->active_workers(); + assert(nworkers == 1, "This code uses only a single worker"); + task_queues()->reserve(nworkers); + + ShenandoahTraversalPrecleanTask task(rp); + workers->run_task(&task); + + assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty"); +} + +// Weak Reference Closures +class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure { + uint _worker_id; + ShenandoahTaskTerminator* _terminator; + bool _reset_terminator; + +public: + ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): + _worker_id(worker_id), + _terminator(t), + _reset_terminator(reset_terminator) { + } + + void do_void() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* sh = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); + assert(sh->process_references(), "why else would we be here?"); + shenandoah_assert_rp_isalive_installed(); + + traversal_gc->main_loop(_worker_id, _terminator, false); + + if (_reset_terminator) { + _terminator->reset_for_reuse(); + } + } +}; + +class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure { + uint _worker_id; + ShenandoahTaskTerminator* _terminator; + bool _reset_terminator; + +public: + ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): + _worker_id(worker_id), + _terminator(t), + _reset_terminator(reset_terminator) { + } + + void do_void() { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* sh = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); + assert(sh->process_references(), "why else would we be here?"); + shenandoah_assert_rp_isalive_installed(); + + traversal_gc->main_loop(_worker_id, _terminator, false); + + if (_reset_terminator) { + _terminator->reset_for_reuse(); + } + } +}; + +void ShenandoahTraversalGC::weak_refs_work() { + assert(_heap->process_references(), "sanity"); + + ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs; + + ShenandoahGCPhase phase(phase_root); + + ReferenceProcessor* rp = _heap->ref_processor(); + + // NOTE: We cannot shortcut on has_discovered_references() here, because + // we will miss marking JNI Weak refs then, see implementation in + // ReferenceProcessor::process_discovered_references. + weak_refs_work_doit(); + + rp->verify_no_references_recorded(); + assert(!rp->discovery_enabled(), "Post condition"); + +} + +class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask { +private: + AbstractRefProcTaskExecutor::ProcessTask& _proc_task; + ShenandoahTaskTerminator* _terminator; + +public: + ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, + ShenandoahTaskTerminator* t) : + AbstractGangTask("Process reference objects in parallel"), + _proc_task(proc_task), + _terminator(t) { + } + + void work(uint worker_id) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); + + ShenandoahForwardedIsAliveClosure is_alive; + if (!heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } else { + ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } + } +}; + +class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor { +private: + WorkGang* _workers; + +public: + ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {} + + // Executes a task using worker threads. + void execute(ProcessTask& task, uint ergo_workers) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); + ShenandoahPushWorkerQueuesScope scope(_workers, + traversal_gc->task_queues(), + ergo_workers, + /* do_check = */ false); + uint nworkers = _workers->active_workers(); + traversal_gc->task_queues()->reserve(nworkers); + ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues()); + ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator); + _workers->run_task(&proc_task_proxy); + } +}; + +void ShenandoahTraversalGC::weak_refs_work_doit() { + ReferenceProcessor* rp = _heap->ref_processor(); + + ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process; + + shenandoah_assert_rp_isalive_not_installed(); + ShenandoahForwardedIsAliveClosure is_alive; + ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); + + WorkGang* workers = _heap->workers(); + uint nworkers = workers->active_workers(); + + rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); + rp->set_active_mt_degree(nworkers); + + assert(task_queues()->is_empty(), "Should be empty"); + + // complete_gc and keep_alive closures instantiated here are only needed for + // single-threaded path in RP. They share the queue 0 for tracking work, which + // simplifies implementation. Since RP may decide to call complete_gc several + // times, we need to be able to reuse the terminator. + uint serial_worker_id = 0; + ShenandoahTaskTerminator terminator(1, task_queues()); + ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); + ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false); + + ShenandoahTraversalRefProcTaskExecutor executor(workers); + + ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); + if (!_heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + } else { + ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + } + + pt.print_all_references(); + assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty"); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp 2020-01-17 17:10:46.126128494 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP + +#include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "runtime/thread.hpp" + +class ShenandoahTraversalGC : public CHeapObj { +private: + ShenandoahHeap* const _heap; + ShenandoahObjToScanQueueSet* const _task_queues; + ShenandoahHeapRegionSet _traversal_set; + +public: + ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions); + ~ShenandoahTraversalGC(); + + ShenandoahHeapRegionSet* traversal_set() { return &_traversal_set; } + + void reset(); + void prepare(); + void init_traversal_collection(); + void concurrent_traversal_collection(); + void final_traversal_collection(); + + template + inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context); + + bool check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield); + + ShenandoahObjToScanQueueSet* task_queues(); + + void main_loop(uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield); + +private: + void prepare_regions(); + + template + void main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield); + + void preclean_weak_refs(); + void weak_refs_work(); + void weak_refs_work_doit(); + + void fixup_roots(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp 2020-01-17 17:10:46.731128461 +0100 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "oops/oop.inline.hpp" + +template +void ShenandoahTraversalGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (DEGEN) { + assert(!ATOMIC_UPDATE, "Degen path assumes non-atomic updates"); + oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + if (obj != forw) { + // Update reference. + RawAccess::oop_store(p, forw); + } + obj = forw; + } else if (_heap->in_collection_set(obj)) { + oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + if (obj == forw) { + ShenandoahEvacOOMScope evac_scope; + forw = _heap->evacuate_object(obj, thread); + } + shenandoah_assert_forwarded_except(p, obj, _heap->cancelled_gc()); + // Update reference. + if (ATOMIC_UPDATE) { + ShenandoahHeap::cas_oop(forw, p, obj); + } else { + RawAccess::oop_store(p, forw); + } + obj = forw; + } + + shenandoah_assert_not_forwarded(p, obj); + shenandoah_assert_not_in_cset_except(p, obj, _heap->cancelled_gc()); + + if (mark_context->mark(obj)) { + bool succeeded = queue->push(ShenandoahMarkTask(obj)); + assert(succeeded, "must succeed to push to task queue"); + + if (STRING_DEDUP && ShenandoahStringDedup::is_candidate(obj) && !_heap->cancelled_gc()) { + assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); + // Only dealing with to-space string, so that we can avoid evac-oom protocol, which is costly here. + shenandoah_assert_not_in_cset(p, obj); + ShenandoahStringDedup::enqueue_candidate(obj); + } + } + } +} + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTraversalMode.cpp 2020-01-17 17:10:47.338128428 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahTraversalMode.hpp" +#include "gc/shenandoah/heuristics/shenandoahTraversalAggressiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" + +void ShenandoahTraversalMode::initialize_flags() const { + FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); + FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true); + FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false); + FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false); + + SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValEnqueueBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +} + +ShenandoahHeuristics* ShenandoahTraversalMode::initialize_heuristics() const { + if (ShenandoahGCHeuristics != NULL) { + if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { + return new ShenandoahTraversalHeuristics(); + } else if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { + return new ShenandoahTraversalAggressiveHeuristics(); + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); + } + } + ShouldNotReachHere(); + return NULL; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTraversalMode.hpp 2020-01-17 17:10:47.939128395 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTRAVERSALMODE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHTRAVERSALMODE_HPP + +#include "gc/shenandoah/shenandoahMode.hpp" + +class ShenandoahHeuristics; + +class ShenandoahTraversalMode : public ShenandoahMode { +public: + virtual void initialize_flags() const; + virtual ShenandoahHeuristics* initialize_heuristics() const; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHTRAVERSALMODE_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp 2020-01-17 17:10:48.542128361 +0100 @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "jfr/jfrEvents.hpp" +#include "gc/shared/gcCause.hpp" +#include "gc/shared/gcTimer.hpp" +#include "gc/shared/gcTrace.hpp" +#include "gc/shared/gcWhen.hpp" +#include "gc/shenandoah/shenandoahAllocTracker.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahMarkCompact.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "utilities/debug.hpp" + +ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahGCPhase::_invalid_phase; + +ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : + _heap(ShenandoahHeap::heap()), + _timer(_heap->gc_timer()), + _tracer(_heap->tracer()) { + assert(!ShenandoahGCPhase::is_valid_phase(ShenandoahGCPhase::current_phase()), + "No current GC phase"); + + _heap->set_gc_cause(cause); + _timer->register_gc_start(); + _tracer->report_gc_start(cause, _timer->gc_start()); + _heap->trace_heap(GCWhen::BeforeGC, _tracer); + + _heap->shenandoah_policy()->record_cycle_start(); + _heap->heuristics()->record_cycle_start(); + _trace_cycle.initialize(_heap->cycle_memory_manager(), cause, + /* allMemoryPoolsAffected */ true, + /* recordGCBeginTime = */ true, + /* recordPreGCUsage = */ true, + /* recordPeakUsage = */ true, + /* recordPostGCUsage = */ true, + /* recordAccumulatedGCTime = */ true, + /* recordGCEndTime = */ true, + /* countCollection = */ true + ); +} + +ShenandoahGCSession::~ShenandoahGCSession() { + _heap->heuristics()->record_cycle_end(); + _timer->register_gc_end(); + _heap->trace_heap(GCWhen::AfterGC, _tracer); + _tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions()); + assert(!ShenandoahGCPhase::is_valid_phase(ShenandoahGCPhase::current_phase()), + "No current GC phase"); + _heap->set_gc_cause(GCCause::_no_gc); +} + +ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type) : + _heap(ShenandoahHeap::heap()), _gc_id_mark(gc_id), _svc_gc_mark(type), _is_gc_active_mark() { + + // FIXME: It seems that JMC throws away level 0 events, which are the Shenandoah + // pause events. Create this pseudo level 0 event to push real events to level 1. + _heap->gc_timer()->register_gc_phase_start("Shenandoah", Ticks::now()); + _trace_pause.initialize(_heap->stw_memory_manager(), _heap->gc_cause(), + /* allMemoryPoolsAffected */ true, + /* recordGCBeginTime = */ true, + /* recordPreGCUsage = */ false, + /* recordPeakUsage = */ false, + /* recordPostGCUsage = */ false, + /* recordAccumulatedGCTime = */ true, + /* recordGCEndTime = */ true, + /* countCollection = */ true + ); + + _heap->heuristics()->record_gc_start(); +} + +ShenandoahGCPauseMark::~ShenandoahGCPauseMark() { + _heap->gc_timer()->register_gc_phase_end(Ticks::now()); + _heap->heuristics()->record_gc_end(); +} + +ShenandoahGCPhase::ShenandoahGCPhase(const ShenandoahPhaseTimings::Phase phase) : + _heap(ShenandoahHeap::heap()), _phase(phase) { + assert(!Thread::current()->is_Worker_thread() && + (Thread::current()->is_VM_thread() || + Thread::current()->is_ConcurrentGC_thread()), + "Must be set by these threads"); + _parent_phase = _current_phase; + _current_phase = phase; + + _heap->phase_timings()->record_phase_start(_phase); +} + +ShenandoahGCPhase::~ShenandoahGCPhase() { + _heap->phase_timings()->record_phase_end(_phase); + _current_phase = _parent_phase; +} + +bool ShenandoahGCPhase::is_valid_phase(ShenandoahPhaseTimings::Phase phase) { + return phase >= 0 && phase < ShenandoahPhaseTimings::_num_phases; +} + +bool ShenandoahGCPhase::is_root_work_phase() { + switch(current_phase()) { + case ShenandoahPhaseTimings::scan_roots: + case ShenandoahPhaseTimings::update_roots: + case ShenandoahPhaseTimings::init_evac: + case ShenandoahPhaseTimings::final_update_refs_roots: + case ShenandoahPhaseTimings::degen_gc_update_roots: + case ShenandoahPhaseTimings::init_traversal_gc_work: + case ShenandoahPhaseTimings::final_traversal_gc_work: + case ShenandoahPhaseTimings::final_traversal_update_roots: + case ShenandoahPhaseTimings::full_gc_roots: + return true; + default: + return false; + } +} + +ShenandoahAllocTrace::ShenandoahAllocTrace(size_t words_size, ShenandoahAllocRequest::Type alloc_type) { + if (ShenandoahAllocationTrace) { + _start = os::elapsedTime(); + _size = words_size; + _alloc_type = alloc_type; + } else { + _start = 0; + _size = 0; + _alloc_type = ShenandoahAllocRequest::Type(0); + } +} + +ShenandoahAllocTrace::~ShenandoahAllocTrace() { + if (ShenandoahAllocationTrace) { + double stop = os::elapsedTime(); + double duration_sec = stop - _start; + double duration_us = duration_sec * 1000000; + ShenandoahAllocTracker* tracker = ShenandoahHeap::heap()->alloc_tracker(); + assert(tracker != NULL, "Must be"); + tracker->record_alloc_latency(_size, _alloc_type, duration_us); + if (duration_us > ShenandoahAllocationStallThreshold) { + log_warning(gc)("Allocation stall: %.0f us (threshold: " INTX_FORMAT " us)", + duration_us, ShenandoahAllocationStallThreshold); + } + } +} + +ShenandoahWorkerSession::ShenandoahWorkerSession(uint worker_id) : _worker_id(worker_id) { + Thread* thr = Thread::current(); + assert(ShenandoahThreadLocalData::worker_id(thr) == ShenandoahThreadLocalData::INVALID_WORKER_ID, "Already set"); + ShenandoahThreadLocalData::set_worker_id(thr, worker_id); +} + +ShenandoahConcurrentWorkerSession::~ShenandoahConcurrentWorkerSession() { + // Do nothing. Per-worker events are not supported in this JDK. +} + +ShenandoahParallelWorkerSession::~ShenandoahParallelWorkerSession() { + // Do nothing. Per-worker events are not supported in this JDK. +} +ShenandoahWorkerSession::~ShenandoahWorkerSession() { +#ifdef ASSERT + Thread* thr = Thread::current(); + assert(ShenandoahThreadLocalData::worker_id(thr) != ShenandoahThreadLocalData::INVALID_WORKER_ID, "Must be set"); + ShenandoahThreadLocalData::set_worker_id(thr, ShenandoahThreadLocalData::INVALID_WORKER_ID); +#endif +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp 2020-01-17 17:10:49.150128328 +0100 @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAHUTILS_HPP +#define SHARE_VM_GC_SHENANDOAHUTILS_HPP + +#include "gc/shared/gcCause.hpp" +#include "gc/shared/vmGCOperations.hpp" +#include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "jfr/jfrEvents.hpp" +#include "memory/allocation.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/vmThread.hpp" +#include "runtime/vmOperations.hpp" +#include "services/memoryService.hpp" + +class GCTimer; +class GCTracer; + +class ShenandoahGCSession : public StackObj { +private: + ShenandoahHeap* const _heap; + GCTimer* const _timer; + GCTracer* const _tracer; + + TraceMemoryManagerStats _trace_cycle; +public: + ShenandoahGCSession(GCCause::Cause cause); + ~ShenandoahGCSession(); +}; + +class ShenandoahGCPhase : public StackObj { +private: + static const ShenandoahPhaseTimings::Phase _invalid_phase = ShenandoahPhaseTimings::_num_phases; + static ShenandoahPhaseTimings::Phase _current_phase; + + ShenandoahHeap* const _heap; + const ShenandoahPhaseTimings::Phase _phase; + ShenandoahPhaseTimings::Phase _parent_phase; +public: + ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase); + ~ShenandoahGCPhase(); + + static ShenandoahPhaseTimings::Phase current_phase() { return _current_phase; } + + static bool is_valid_phase(ShenandoahPhaseTimings::Phase phase); + static bool is_current_phase_valid() { return is_valid_phase(current_phase()); } + static bool is_root_work_phase(); +}; + +// Aggregates all the things that should happen before/after the pause. +class ShenandoahGCPauseMark : public StackObj { +private: + ShenandoahHeap* const _heap; + const GCIdMark _gc_id_mark; + const SvcGCMarker _svc_gc_mark; + const IsGCActiveMark _is_gc_active_mark; + TraceMemoryManagerStats _trace_pause; + +public: + ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type); + ~ShenandoahGCPauseMark(); +}; + +class ShenandoahAllocTrace : public StackObj { +private: + double _start; + size_t _size; + ShenandoahAllocRequest::Type _alloc_type; +public: + ShenandoahAllocTrace(size_t words_size, ShenandoahAllocRequest::Type alloc_type); + ~ShenandoahAllocTrace(); +}; + +class ShenandoahSafepoint : public AllStatic { +public: + // check if Shenandoah GC safepoint is in progress + static inline bool is_at_shenandoah_safepoint() { + if (!SafepointSynchronize::is_at_safepoint()) return false; + + VM_Operation* vm_op = VMThread::vm_operation(); + if (vm_op == NULL) return false; + + VM_Operation::VMOp_Type type = vm_op->type(); + return type == VM_Operation::VMOp_ShenandoahInitMark || + type == VM_Operation::VMOp_ShenandoahFinalMarkStartEvac || + type == VM_Operation::VMOp_ShenandoahFinalEvac || + type == VM_Operation::VMOp_ShenandoahInitTraversalGC || + type == VM_Operation::VMOp_ShenandoahFinalTraversalGC || + type == VM_Operation::VMOp_ShenandoahInitUpdateRefs || + type == VM_Operation::VMOp_ShenandoahFinalUpdateRefs || + type == VM_Operation::VMOp_ShenandoahFullGC || + type == VM_Operation::VMOp_ShenandoahDegeneratedGC; + } +}; + +class ShenandoahWorkerSession : public StackObj { +protected: + uint _worker_id; + + ShenandoahWorkerSession(uint worker_id); + ~ShenandoahWorkerSession(); +public: + static inline uint worker_id() { + Thread* thr = Thread::current(); + uint id = ShenandoahThreadLocalData::worker_id(thr); + assert(id != ShenandoahThreadLocalData::INVALID_WORKER_ID, "Worker session has not been created"); + return id; + } +}; + +class ShenandoahConcurrentWorkerSession : public ShenandoahWorkerSession { +public: + ShenandoahConcurrentWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { } + ~ShenandoahConcurrentWorkerSession(); +}; + +class ShenandoahParallelWorkerSession : public ShenandoahWorkerSession { +public: + ShenandoahParallelWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { } + ~ShenandoahParallelWorkerSession(); +}; + +class ShenandoahSuspendibleThreadSetJoiner { +private: + SuspendibleThreadSetJoiner _joiner; +public: + ShenandoahSuspendibleThreadSetJoiner(bool active = true) : _joiner(active) { + assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope"); + } + ~ShenandoahSuspendibleThreadSetJoiner() { + assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope"); + } +}; + +class ShenandoahSuspendibleThreadSetLeaver { +private: + SuspendibleThreadSetLeaver _leaver; +public: + ShenandoahSuspendibleThreadSetLeaver(bool active = true) : _leaver(active) { + assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope"); + } + ~ShenandoahSuspendibleThreadSetLeaver() { + assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope"); + } +}; + +#endif // SHARE_VM_GC_SHENANDOAHUTILS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp 2020-01-17 17:10:49.757128294 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVMOperations.hpp" + +bool VM_ShenandoahReferenceOperation::doit_prologue() { + Heap_lock->lock(); + return true; +} + +void VM_ShenandoahReferenceOperation::doit_epilogue() { + if (Universe::has_reference_pending_list()) { + Heap_lock->notify_all(); + } + Heap_lock->unlock(); +} + +void VM_ShenandoahInitMark::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_init_mark(); +} + +void VM_ShenandoahFinalMarkStartEvac::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_final_mark(); +} + +void VM_ShenandoahFinalEvac::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_final_evac(); +} + +void VM_ShenandoahFullGC::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::FULL); + ShenandoahHeap::heap()->entry_full(_gc_cause); +} + +void VM_ShenandoahDegeneratedGC::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_degenerated(_point); +} + +void VM_ShenandoahInitTraversalGC::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_init_traversal(); +} + +void VM_ShenandoahFinalTraversalGC::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_final_traversal(); +} + +void VM_ShenandoahInitUpdateRefs::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_init_updaterefs(); +} + +void VM_ShenandoahFinalUpdateRefs::doit() { + ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); + ShenandoahHeap::heap()->entry_final_updaterefs(); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp 2020-01-17 17:10:50.365128261 +0100 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP +#define SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP + +#include "gc/shared/vmGCOperations.hpp" + +// VM_operations for the Shenandoah Collector. +// +// VM_ShenandoahOperation +// - VM_ShenandoahInitMark: initiate concurrent marking +// - VM_ShenandoahReferenceOperation: +// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation +// - VM_ShenandoahFinalEvac: finish concurrent evacuation +// - VM_ShenandoahInitUpdateRefs: initiate update references +// - VM_ShenandoahFinalUpdateRefs: finish up update references +// - VM_ShenandoahFullGC: do full GC +// - VM_ShenandoahInitTraversalGC: init traversal GC +// - VM_ShenandoahFinalTraversalGC: finish traversal GC + +class VM_ShenandoahOperation : public VM_Operation { +protected: + uint _gc_id; +public: + VM_ShenandoahOperation() : _gc_id(GCId::current()) {}; +}; + +class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation { +public: + VM_ShenandoahReferenceOperation() : VM_ShenandoahOperation() {}; + bool doit_prologue(); + void doit_epilogue(); +}; + +class VM_ShenandoahInitMark: public VM_ShenandoahOperation { +public: + VM_ShenandoahInitMark() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitMark; } + const char* name() const { return "Shenandoah Init Marking"; } + virtual void doit(); +}; + +class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahReferenceOperation { +public: + VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahReferenceOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; } + const char* name() const { return "Shenandoah Final Mark and Start Evacuation"; } + virtual void doit(); +}; + +class VM_ShenandoahFinalEvac: public VM_ShenandoahOperation { +public: + VM_ShenandoahFinalEvac() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalEvac; } + const char* name() const { return "Shenandoah Final Evacuation"; } + virtual void doit(); +}; + +class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation { +private: + // Really the ShenandoahHeap::ShenandoahDegenerationPoint, but casted to int here + // in order to avoid dependency on ShenandoahHeap + int _point; +public: + VM_ShenandoahDegeneratedGC(int point) : VM_ShenandoahReferenceOperation(), _point(point) {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahDegeneratedGC; } + const char* name() const { return "Shenandoah Degenerated GC"; } + virtual void doit(); +}; + +class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation { +private: + GCCause::Cause _gc_cause; +public: + VM_ShenandoahFullGC(GCCause::Cause gc_cause) : VM_ShenandoahReferenceOperation(), _gc_cause(gc_cause) {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFullGC; } + const char* name() const { return "Shenandoah Full GC"; } + virtual void doit(); +}; + +class VM_ShenandoahInitTraversalGC: public VM_ShenandoahOperation { +public: + VM_ShenandoahInitTraversalGC() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitTraversalGC; } + const char* name() const { return "Shenandoah Init Traversal Collection"; } + virtual void doit(); +}; + +class VM_ShenandoahFinalTraversalGC: public VM_ShenandoahReferenceOperation { +public: + VM_ShenandoahFinalTraversalGC() : VM_ShenandoahReferenceOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalTraversalGC; } + const char* name() const { return "Shenandoah Final Traversal Collection"; } + virtual void doit(); +}; + +class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation { +public: + VM_ShenandoahInitUpdateRefs() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; } + const char* name() const { return "Shenandoah Init Update References"; } + virtual void doit(); +}; + +class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation { +public: + VM_ShenandoahFinalUpdateRefs() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; } + const char* name() const { return "Shenandoah Final Update References"; } + virtual void doit(); +}; + +#endif //SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp 2020-01-17 17:10:50.973128227 +0100 @@ -0,0 +1,1020 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahForwarding.inline.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/resourceArea.hpp" + +// Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp) +#ifdef verify_oop +#undef verify_oop +#endif + +class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { +private: + const char* _phase; + ShenandoahVerifier::VerifyOptions _options; + ShenandoahVerifierStack* _stack; + ShenandoahHeap* _heap; + MarkBitMap* _map; + ShenandoahLivenessData* _ld; + void* _interior_loc; + oop _loc; + +public: + ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld, + const char* phase, ShenandoahVerifier::VerifyOptions options) : + _phase(phase), + _options(options), + _stack(stack), + _heap(ShenandoahHeap::heap()), + _map(map), + _ld(ld), + _interior_loc(NULL), + _loc(NULL) { } + +private: + void check(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) { + if (!test) { + ShenandoahAsserts::print_failure(level, obj, _interior_loc, _loc, _phase, label, __FILE__, __LINE__); + } + } + + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + + // Single threaded verification can use faster non-atomic stack and bitmap + // methods. + // + // For performance reasons, only fully verify non-marked field values. + // We are here when the host object for *p is already marked. + + HeapWord* addr = (HeapWord*) obj; + if (_map->parMark(addr)) { + verify_oop_at(p, obj); + _stack->push(ShenandoahVerifierTask(obj)); + } + } + } + + void verify_oop(oop obj) { + // Perform consistency checks with gradually decreasing safety level. This guarantees + // that failure report would not try to touch something that was not yet verified to be + // safe to process. + + check(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in(obj), + "oop must be in heap"); + check(ShenandoahAsserts::_safe_unknown, obj, check_obj_alignment(obj), + "oop must be aligned"); + + ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); + Klass* obj_klass = obj->klass_or_null(); + + // Verify that obj is not in dead space: + { + // Do this before touching obj->size() + check(ShenandoahAsserts::_safe_unknown, obj, obj_klass != NULL, + "Object klass pointer should not be NULL"); + check(ShenandoahAsserts::_safe_unknown, obj, Metaspace::contains(obj_klass), + "Object klass pointer must go to metaspace"); + + HeapWord *obj_addr = (HeapWord *) obj; + check(ShenandoahAsserts::_safe_unknown, obj, obj_addr < obj_reg->top(), + "Object start should be within the region"); + + if (!obj_reg->is_humongous()) { + check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(), + "Object end should be within the region"); + } else { + size_t humongous_start = obj_reg->region_number(); + size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift()); + for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) { + check(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(), + "Humongous object is in continuation that fits it"); + } + } + + // ------------ obj is safe at this point -------------- + + check(ShenandoahAsserts::_safe_oop, obj, obj_reg->is_active(), + "Object should be in active region"); + + switch (_options._verify_liveness) { + case ShenandoahVerifier::_verify_liveness_disable: + // skip + break; + case ShenandoahVerifier::_verify_liveness_complete: + Atomic::add((uint) obj->size(), &_ld[obj_reg->region_number()]); + // fallthrough for fast failure for un-live regions: + case ShenandoahVerifier::_verify_liveness_conservative: + check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), + "Object must belong to region with live data"); + break; + default: + assert(false, "Unhandled liveness verification"); + } + } + + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + + ShenandoahHeapRegion* fwd_reg = NULL; + + if (obj != fwd) { + check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd), + "Forwardee must be in heap"); + check(ShenandoahAsserts::_safe_oop, obj, !CompressedOops::is_null(fwd), + "Forwardee is set"); + check(ShenandoahAsserts::_safe_oop, obj, check_obj_alignment(fwd), + "Forwardee must be aligned"); + + // Do this before touching fwd->size() + Klass* fwd_klass = fwd->klass_or_null(); + check(ShenandoahAsserts::_safe_oop, obj, fwd_klass != NULL, + "Forwardee klass pointer should not be NULL"); + check(ShenandoahAsserts::_safe_oop, obj, Metaspace::contains(fwd_klass), + "Forwardee klass pointer must go to metaspace"); + check(ShenandoahAsserts::_safe_oop, obj, obj_klass == fwd_klass, + "Forwardee klass pointer must go to metaspace"); + + fwd_reg = _heap->heap_region_containing(fwd); + + // Verify that forwardee is not in the dead space: + check(ShenandoahAsserts::_safe_oop, obj, !fwd_reg->is_humongous(), + "Should have no humongous forwardees"); + + HeapWord *fwd_addr = (HeapWord *) fwd; + check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), + "Forwardee start should be within the region"); + check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(), + "Forwardee end should be within the region"); + + oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); + check(ShenandoahAsserts::_safe_oop, obj, fwd == fwd2, + "Double forwarding"); + } else { + fwd_reg = obj_reg; + } + + // ------------ obj and fwd are safe at this point -------------- + + switch (_options._verify_marked) { + case ShenandoahVerifier::_verify_marked_disable: + // skip + break; + case ShenandoahVerifier::_verify_marked_incomplete: + check(ShenandoahAsserts::_safe_all, obj, _heap->marking_context()->is_marked(obj), + "Must be marked in incomplete bitmap"); + break; + case ShenandoahVerifier::_verify_marked_complete: + check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj), + "Must be marked in complete bitmap"); + break; + default: + assert(false, "Unhandled mark verification"); + } + + switch (_options._verify_forwarded) { + case ShenandoahVerifier::_verify_forwarded_disable: + // skip + break; + case ShenandoahVerifier::_verify_forwarded_none: { + check(ShenandoahAsserts::_safe_all, obj, obj == fwd, + "Should not be forwarded"); + break; + } + case ShenandoahVerifier::_verify_forwarded_allow: { + if (obj != fwd) { + check(ShenandoahAsserts::_safe_all, obj, obj_reg != fwd_reg, + "Forwardee should be in another region"); + } + break; + } + default: + assert(false, "Unhandled forwarding verification"); + } + + switch (_options._verify_cset) { + case ShenandoahVerifier::_verify_cset_disable: + // skip + break; + case ShenandoahVerifier::_verify_cset_none: + check(ShenandoahAsserts::_safe_all, obj, !_heap->in_collection_set(obj), + "Should not have references to collection set"); + break; + case ShenandoahVerifier::_verify_cset_forwarded: + if (_heap->in_collection_set(obj)) { + check(ShenandoahAsserts::_safe_all, obj, obj != fwd, + "Object in collection set, should have forwardee"); + } + break; + default: + assert(false, "Unhandled cset verification"); + } + + } + +public: + /** + * Verify object with known interior reference. + * @param p interior reference where the object is referenced from; can be off-heap + * @param obj verified object + */ + template + void verify_oop_at(T* p, oop obj) { + _interior_loc = p; + verify_oop(obj); + _interior_loc = NULL; + } + + /** + * Verify object without known interior reference. + * Useful when picking up the object at known offset in heap, + * but without knowing what objects reference it. + * @param obj verified object + */ + void verify_oop_standalone(oop obj) { + _interior_loc = NULL; + verify_oop(obj); + _interior_loc = NULL; + } + + /** + * Verify oop fields from this object. + * @param obj host object for verified fields + */ + void verify_oops_from(oop obj) { + _loc = obj; + obj->oop_iterate(this); + _loc = NULL; + } + + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { +private: + size_t _used, _committed, _garbage; +public: + ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0) {}; + + void heap_region_do(ShenandoahHeapRegion* r) { + _used += r->used(); + _garbage += r->garbage(); + _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0; + } + + size_t used() { return _used; } + size_t committed() { return _committed; } + size_t garbage() { return _garbage; } +}; + +class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahHeap* _heap; + const char* _phase; + ShenandoahVerifier::VerifyRegions _regions; +public: + ShenandoahVerifyHeapRegionClosure(const char* phase, ShenandoahVerifier::VerifyRegions regions) : + _heap(ShenandoahHeap::heap()), + _phase(phase), + _regions(regions) {}; + + void print_failure(ShenandoahHeapRegion* r, const char* label) { + ResourceMark rm; + + ShenandoahMessageBuffer msg("Shenandoah verification failed; %s: %s\n\n", _phase, label); + + stringStream ss; + r->print_on(&ss); + msg.append("%s", ss.as_string()); + + report_vm_error(__FILE__, __LINE__, msg.buffer()); + } + + void verify(ShenandoahHeapRegion* r, bool test, const char* msg) { + if (!test) { + print_failure(r, msg); + } + } + + void heap_region_do(ShenandoahHeapRegion* r) { + switch (_regions) { + case ShenandoahVerifier::_verify_regions_disable: + break; + case ShenandoahVerifier::_verify_regions_notrash: + verify(r, !r->is_trash(), + "Should not have trash regions"); + break; + case ShenandoahVerifier::_verify_regions_nocset: + verify(r, !r->is_cset(), + "Should not have cset regions"); + break; + case ShenandoahVerifier::_verify_regions_notrash_nocset: + verify(r, !r->is_trash(), + "Should not have trash regions"); + verify(r, !r->is_cset(), + "Should not have cset regions"); + break; + default: + ShouldNotReachHere(); + } + + verify(r, r->capacity() == ShenandoahHeapRegion::region_size_bytes(), + "Capacity should match region size"); + + verify(r, r->bottom() <= r->top(), + "Region top should not be less than bottom"); + + verify(r, r->bottom() <= _heap->marking_context()->top_at_mark_start(r), + "Region TAMS should not be less than bottom"); + + verify(r, _heap->marking_context()->top_at_mark_start(r) <= r->top(), + "Complete TAMS should not be larger than top"); + + verify(r, r->get_live_data_bytes() <= r->capacity(), + "Live data cannot be larger than capacity"); + + verify(r, r->garbage() <= r->capacity(), + "Garbage cannot be larger than capacity"); + + verify(r, r->used() <= r->capacity(), + "Used cannot be larger than capacity"); + + verify(r, r->get_shared_allocs() <= r->capacity(), + "Shared alloc count should not be larger than capacity"); + + verify(r, r->get_tlab_allocs() <= r->capacity(), + "TLAB alloc count should not be larger than capacity"); + + verify(r, r->get_gclab_allocs() <= r->capacity(), + "GCLAB alloc count should not be larger than capacity"); + + verify(r, r->get_shared_allocs() + r->get_tlab_allocs() + r->get_gclab_allocs() == r->used(), + "Accurate accounting: shared + TLAB + GCLAB = used"); + + verify(r, !r->is_empty() || !r->has_live(), + "Empty regions should not have live data"); + + verify(r, r->is_cset() == _heap->collection_set()->is_in(r), + "Transitional: region flags and collection set agree"); + + verify(r, r->is_empty() || r->seqnum_first_alloc() != 0, + "Non-empty regions should have first seqnum set"); + + verify(r, r->is_empty() || (r->seqnum_first_alloc_mutator() != 0 || r->seqnum_first_alloc_gc() != 0), + "Non-empty regions should have first seqnum set to either GC or mutator"); + + verify(r, r->is_empty() || r->seqnum_last_alloc() != 0, + "Non-empty regions should have last seqnum set"); + + verify(r, r->is_empty() || (r->seqnum_last_alloc_mutator() != 0 || r->seqnum_last_alloc_gc() != 0), + "Non-empty regions should have last seqnum set to either GC or mutator"); + + verify(r, r->seqnum_first_alloc() <= r->seqnum_last_alloc(), + "First seqnum should not be greater than last timestamp"); + + verify(r, r->seqnum_first_alloc_mutator() <= r->seqnum_last_alloc_mutator(), + "First mutator seqnum should not be greater than last seqnum"); + + verify(r, r->seqnum_first_alloc_gc() <= r->seqnum_last_alloc_gc(), + "First GC seqnum should not be greater than last seqnum"); + } +}; + +class ShenandoahVerifierReachableTask : public AbstractGangTask { +private: + const char* _label; + ShenandoahRootVerifier* _verifier; + ShenandoahVerifier::VerifyOptions _options; + ShenandoahHeap* _heap; + ShenandoahLivenessData* _ld; + MarkBitMap* _bitmap; + volatile size_t _processed; + +public: + ShenandoahVerifierReachableTask(MarkBitMap* bitmap, + ShenandoahLivenessData* ld, + ShenandoahRootVerifier* verifier, + const char* label, + ShenandoahVerifier::VerifyOptions options) : + AbstractGangTask("Shenandoah Parallel Verifier Reachable Task"), + _label(label), + _verifier(verifier), + _options(options), + _heap(ShenandoahHeap::heap()), + _ld(ld), + _bitmap(bitmap), + _processed(0) {}; + + size_t processed() { + return _processed; + } + + virtual void work(uint worker_id) { + ResourceMark rm; + ShenandoahVerifierStack stack; + + // On level 2, we need to only check the roots once. + // On level 3, we want to check the roots, and seed the local stack. + // It is a lesser evil to accept multiple root scans at level 3, because + // extended parallelism would buy us out. + if (((ShenandoahVerifyLevel == 2) && (worker_id == 0)) + || (ShenandoahVerifyLevel >= 3)) { + ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, + ShenandoahMessageBuffer("%s, Roots", _label), + _options); + if (_heap->unload_classes()) { + _verifier->strong_roots_do(&cl); + } else { + _verifier->roots_do(&cl); + } + } + + size_t processed = 0; + + if (ShenandoahVerifyLevel >= 3) { + ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, + ShenandoahMessageBuffer("%s, Reachable", _label), + _options); + while (!stack.is_empty()) { + processed++; + ShenandoahVerifierTask task = stack.pop(); + cl.verify_oops_from(task.obj()); + } + } + + Atomic::add(processed, &_processed); + } +}; + +class ShenandoahVerifierMarkedRegionTask : public AbstractGangTask { +private: + const char* _label; + ShenandoahVerifier::VerifyOptions _options; + ShenandoahHeap *_heap; + MarkBitMap* _bitmap; + ShenandoahLivenessData* _ld; + volatile size_t _claimed; + volatile size_t _processed; + +public: + ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap, + ShenandoahLivenessData* ld, + const char* label, + ShenandoahVerifier::VerifyOptions options) : + AbstractGangTask("Shenandoah Parallel Verifier Marked Region"), + _label(label), + _options(options), + _heap(ShenandoahHeap::heap()), + _bitmap(bitmap), + _ld(ld), + _claimed(0), + _processed(0) {}; + + size_t processed() { + return _processed; + } + + virtual void work(uint worker_id) { + ShenandoahVerifierStack stack; + ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, + ShenandoahMessageBuffer("%s, Marked", _label), + _options); + + while (true) { + size_t v = Atomic::add(1u, &_claimed) - 1; + if (v < _heap->num_regions()) { + ShenandoahHeapRegion* r = _heap->get_region(v); + if (!r->is_humongous() && !r->is_trash()) { + work_regular(r, stack, cl); + } else if (r->is_humongous_start()) { + work_humongous(r, stack, cl); + } + } else { + break; + } + } + } + + virtual void work_humongous(ShenandoahHeapRegion *r, ShenandoahVerifierStack& stack, ShenandoahVerifyOopClosure& cl) { + size_t processed = 0; + HeapWord* obj = r->bottom(); + if (_heap->complete_marking_context()->is_marked((oop)obj)) { + verify_and_follow(obj, stack, cl, &processed); + } + Atomic::add(processed, &_processed); + } + + virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { + size_t processed = 0; + MarkBitMap* mark_bit_map = _heap->complete_marking_context()->mark_bit_map(); + HeapWord* tams = _heap->complete_marking_context()->top_at_mark_start(r); + + // Bitmaps, before TAMS + if (tams > r->bottom()) { + HeapWord* start = r->bottom(); + HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, tams); + + while (addr < tams) { + verify_and_follow(addr, stack, cl, &processed); + addr += 1; + if (addr < tams) { + addr = mark_bit_map->getNextMarkedWordAddress(addr, tams); + } + } + } + + // Size-based, after TAMS + { + HeapWord* limit = r->top(); + HeapWord* addr = tams; + + while (addr < limit) { + verify_and_follow(addr, stack, cl, &processed); + addr += oop(addr)->size(); + } + } + + Atomic::add(processed, &_processed); + } + + void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) { + if (!_bitmap->parMark(addr)) return; + + // Verify the object itself: + oop obj = oop(addr); + cl.verify_oop_standalone(obj); + + // Verify everything reachable from that object too, hopefully realizing + // everything was already marked, and never touching further: + cl.verify_oops_from(obj); + (*processed)++; + + while (!stack.is_empty()) { + ShenandoahVerifierTask task = stack.pop(); + cl.verify_oops_from(task.obj()); + (*processed)++; + } + } +}; + +class VerifyThreadGCState : public ThreadClosure { +private: + const char* _label; + char _expected; + +public: + VerifyThreadGCState(const char* label, char expected) : _expected(expected) {} + void do_thread(Thread* t) { + char actual = ShenandoahThreadLocalData::gc_state(t); + if (actual != _expected) { + fatal("%s: Thread %s: expected gc-state %d, actual %d", _label, t->name(), _expected, actual); + } + } +}; + +class ShenandoahGCStateResetter : public StackObj { +private: + ShenandoahHeap* const _heap; + char _gc_state; + +public: + ShenandoahGCStateResetter() : _heap(ShenandoahHeap::heap()) { + _gc_state = _heap->gc_state(); + _heap->_gc_state.clear(); + } + + ~ShenandoahGCStateResetter() { + _heap->_gc_state.set(_gc_state); + assert(_heap->gc_state() == _gc_state, "Should be restored"); + } +}; + +void ShenandoahVerifier::verify_at_safepoint(const char *label, + VerifyForwarded forwarded, VerifyMarked marked, + VerifyCollectionSet cset, + VerifyLiveness liveness, VerifyRegions regions, + VerifyGCState gcstate) { + guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); + guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize"); + + // Avoid side-effect of changing workers' active thread count, but bypass concurrent/parallel protocol check + ShenandoahPushWorkerScope verify_worker_scope(_heap->workers(), _heap->max_workers(), false /*bypass check*/); + + log_info(gc,start)("Verify %s, Level " INTX_FORMAT, label, ShenandoahVerifyLevel); + + // GC state checks + { + char expected = -1; + bool enabled; + switch (gcstate) { + case _verify_gcstate_disable: + enabled = false; + break; + case _verify_gcstate_forwarded: + enabled = true; + expected = ShenandoahHeap::HAS_FORWARDED; + break; + case _verify_gcstate_evacuation: + enabled = true; + expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION; + break; + case _verify_gcstate_stable: + enabled = true; + expected = ShenandoahHeap::STABLE; + break; + default: + enabled = false; + assert(false, "Unhandled gc-state verification"); + } + + if (enabled) { + char actual = _heap->gc_state(); + if (actual != expected) { + fatal("%s: Global gc-state: expected %d, actual %d", label, expected, actual); + } + + VerifyThreadGCState vtgcs(label, expected); + Threads::java_threads_do(&vtgcs); + } + } + + // Deactivate barriers temporarily: Verifier wants plain heap accesses + ShenandoahGCStateResetter resetter; + + // Heap size checks + { + ShenandoahHeapLocker lock(_heap->lock()); + + ShenandoahCalculateRegionStatsClosure cl; + _heap->heap_region_iterate(&cl); + size_t heap_used = _heap->used(); + guarantee(cl.used() == heap_used, + "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", + label, + byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), + byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); + + size_t heap_committed = _heap->committed(); + guarantee(cl.committed() == heap_committed, + "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s", + label, + byte_size_in_proper_unit(heap_committed), proper_unit_for_byte_size(heap_committed), + byte_size_in_proper_unit(cl.committed()), proper_unit_for_byte_size(cl.committed())); + } + + // Internal heap region checks + if (ShenandoahVerifyLevel >= 1) { + ShenandoahVerifyHeapRegionClosure cl(label, regions); + _heap->heap_region_iterate(&cl); + } + + OrderAccess::fence(); + _heap->make_parsable(false); + + // Allocate temporary bitmap for storing marking wavefront: + _verification_bit_map->clear(); + + // Allocate temporary array for storing liveness data + ShenandoahLivenessData* ld = NEW_C_HEAP_ARRAY(ShenandoahLivenessData, _heap->num_regions(), mtGC); + Copy::fill_to_bytes((void*)ld, _heap->num_regions()*sizeof(ShenandoahLivenessData), 0); + + const VerifyOptions& options = ShenandoahVerifier::VerifyOptions(forwarded, marked, cset, liveness, regions, gcstate); + + // Steps 1-2. Scan root set to get initial reachable set. Finish walking the reachable heap. + // This verifies what application can see, since it only cares about reachable objects. + size_t count_reachable = 0; + if (ShenandoahVerifyLevel >= 2) { + ShenandoahRootVerifier verifier; + + ShenandoahVerifierReachableTask task(_verification_bit_map, ld, &verifier, label, options); + _heap->workers()->run_task(&task); + count_reachable = task.processed(); + } + + // Step 3. Walk marked objects. Marked objects might be unreachable. This verifies what collector, + // not the application, can see during the region scans. There is no reason to process the objects + // that were already verified, e.g. those marked in verification bitmap. There is interaction with TAMS: + // before TAMS, we verify the bitmaps, if available; after TAMS, we walk until the top(). It mimics + // what marked_object_iterate is doing, without calling into that optimized (and possibly incorrect) + // version + + size_t count_marked = 0; + if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete) { + guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete"); + ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options); + _heap->workers()->run_task(&task); + count_marked = task.processed(); + } else { + guarantee(ShenandoahVerifyLevel < 4 || marked == _verify_marked_incomplete || marked == _verify_marked_disable, "Should be"); + } + + // Step 4. Verify accumulated liveness data, if needed. Only reliable if verification level includes + // marked objects. + + if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete && liveness == _verify_liveness_complete) { + for (size_t i = 0; i < _heap->num_regions(); i++) { + ShenandoahHeapRegion* r = _heap->get_region(i); + + juint verf_live = 0; + if (r->is_humongous()) { + // For humongous objects, test if start region is marked live, and if so, + // all humongous regions in that chain have live data equal to their "used". + juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]); + if (start_live > 0) { + verf_live = (juint)(r->used() / HeapWordSize); + } + } else { + verf_live = OrderAccess::load_acquire(&ld[r->region_number()]); + } + + size_t reg_live = r->get_live_data_words(); + if (reg_live != verf_live) { + ResourceMark rm; + stringStream ss; + r->print_on(&ss); + fatal("%s: Live data should match: region-live = " SIZE_FORMAT ", verifier-live = " UINT32_FORMAT "\n%s", + label, reg_live, verf_live, ss.as_string()); + } + } + } + + log_info(gc)("Verify %s, Level " INTX_FORMAT " (" SIZE_FORMAT " reachable, " SIZE_FORMAT " marked)", + label, ShenandoahVerifyLevel, count_reachable, count_marked); + + FREE_C_HEAP_ARRAY(ShenandoahLivenessData, ld); +} + +void ShenandoahVerifier::verify_generic(VerifyOption vo) { + verify_at_safepoint( + "Generic Verification", + _verify_forwarded_allow, // conservatively allow forwarded + _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations + _verify_cset_disable, // cset may be inconsistent + _verify_liveness_disable, // no reliable liveness data + _verify_regions_disable, // no reliable region data + _verify_gcstate_disable // no data about gcstate + ); +} + +void ShenandoahVerifier::verify_before_concmark() { + if (_heap->has_forwarded_objects()) { + verify_at_safepoint( + "Before Mark", + _verify_forwarded_allow, // may have forwarded references + _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations + _verify_cset_forwarded, // allow forwarded references to cset + _verify_liveness_disable, // no reliable liveness data + _verify_regions_notrash, // no trash regions + _verify_gcstate_forwarded // there are forwarded objects + ); + } else { + verify_at_safepoint( + "Before Mark", + _verify_forwarded_none, // UR should have fixed up + _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations + _verify_cset_none, // UR should have fixed this + _verify_liveness_disable, // no reliable liveness data + _verify_regions_notrash, // no trash regions + _verify_gcstate_stable // there are no forwarded objects + ); + } +} + +void ShenandoahVerifier::verify_after_concmark() { + verify_at_safepoint( + "After Mark", + _verify_forwarded_none, // no forwarded references + _verify_marked_complete, // bitmaps as precise as we can get + _verify_cset_none, // no references to cset anymore + _verify_liveness_complete, // liveness data must be complete here + _verify_regions_disable, // trash regions not yet recycled + _verify_gcstate_stable // mark should have stabilized the heap + ); +} + +void ShenandoahVerifier::verify_before_evacuation() { + verify_at_safepoint( + "Before Evacuation", + _verify_forwarded_none, // no forwarded references + _verify_marked_complete, // walk over marked objects too + _verify_cset_disable, // non-forwarded references to cset expected + _verify_liveness_complete, // liveness data must be complete here + _verify_regions_disable, // trash regions not yet recycled + _verify_gcstate_stable // mark should have stabilized the heap + ); +} + +void ShenandoahVerifier::verify_during_evacuation() { + verify_at_safepoint( + "During Evacuation", + _verify_forwarded_allow, // some forwarded references are allowed + _verify_marked_disable, // walk only roots + _verify_cset_disable, // some cset references are not forwarded yet + _verify_liveness_disable, // liveness data might be already stale after pre-evacs + _verify_regions_disable, // trash regions not yet recycled + _verify_gcstate_evacuation // evacuation is in progress + ); +} + +void ShenandoahVerifier::verify_after_evacuation() { + verify_at_safepoint( + "After Evacuation", + _verify_forwarded_allow, // objects are still forwarded + _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well + _verify_cset_forwarded, // all cset refs are fully forwarded + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_notrash, // trash regions have been recycled already + _verify_gcstate_forwarded // evacuation produced some forwarded objects + ); +} + +void ShenandoahVerifier::verify_before_updaterefs() { + verify_at_safepoint( + "Before Updating References", + _verify_forwarded_allow, // forwarded references allowed + _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well + _verify_cset_forwarded, // all cset refs are fully forwarded + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_notrash, // trash regions have been recycled already + _verify_gcstate_forwarded // evacuation should have produced some forwarded objects + ); +} + +void ShenandoahVerifier::verify_after_updaterefs() { + verify_at_safepoint( + "After Updating References", + _verify_forwarded_none, // no forwarded references + _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well + _verify_cset_none, // no cset references, all updated + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_nocset, // no cset regions, trash regions have appeared + _verify_gcstate_stable // update refs had cleaned up forwarded objects + ); +} + +void ShenandoahVerifier::verify_after_degenerated() { + verify_at_safepoint( + "After Degenerated GC", + _verify_forwarded_none, // all objects are non-forwarded + _verify_marked_complete, // all objects are marked in complete bitmap + _verify_cset_none, // no cset references + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_notrash_nocset, // no trash, no cset + _verify_gcstate_stable // degenerated refs had cleaned up forwarded objects + ); +} + +void ShenandoahVerifier::verify_before_traversal() { + verify_at_safepoint( + "Before Traversal", + _verify_forwarded_none, // cannot have forwarded objects + _verify_marked_disable, // bitmaps are not relevant before traversal + _verify_cset_none, // no cset references before traversal + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_notrash_nocset, // no trash and no cset regions + _verify_gcstate_stable // nothing forwarded before traversal + ); +} + +void ShenandoahVerifier::verify_after_traversal() { + verify_at_safepoint( + "After Traversal", + _verify_forwarded_none, // cannot have forwarded objects + _verify_marked_complete, // should have complete marking after traversal + _verify_cset_none, // no cset references left after traversal + _verify_liveness_disable, // liveness data is not collected for new allocations + _verify_regions_nocset, // no cset regions, trash regions allowed + _verify_gcstate_stable // nothing forwarded after traversal + ); +} + +void ShenandoahVerifier::verify_before_fullgc() { + verify_at_safepoint( + "Before Full GC", + _verify_forwarded_allow, // can have forwarded objects + _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations + _verify_cset_disable, // cset might be foobared + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_disable, // no reliable region data here + _verify_gcstate_disable // no reliable gcstate data + ); +} + +void ShenandoahVerifier::verify_after_fullgc() { + verify_at_safepoint( + "After Full GC", + _verify_forwarded_none, // all objects are non-forwarded + _verify_marked_complete, // all objects are marked in complete bitmap + _verify_cset_none, // no cset references + _verify_liveness_disable, // no reliable liveness data anymore + _verify_regions_notrash_nocset, // no trash, no cset + _verify_gcstate_stable // full gc cleaned up everything + ); +} + +class ShenandoahVerifyNoForwared : public OopClosure { +private: + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + if (obj != fwd) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, + "Verify Roots", "Should not be forwarded", __FILE__, __LINE__); + } + } + } + +public: + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +class ShenandoahVerifyInToSpaceClosure : public OopClosure { +private: + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + ShenandoahHeap* heap = ShenandoahHeap::heap_no_check(); + + if (!heap->marking_context()->is_marked(obj)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, + "Verify Roots In To-Space", "Should be marked", __FILE__, __LINE__); + } + + if (heap->in_collection_set(obj)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, + "Verify Roots In To-Space", "Should not be in collection set", __FILE__, __LINE__); + } + + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + if (obj != fwd) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, + "Verify Roots In To-Space", "Should not be forwarded", __FILE__, __LINE__); + } + } + } + +public: + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } +}; + +void ShenandoahVerifier::verify_roots_in_to_space() { + ShenandoahRootVerifier verifier; + ShenandoahVerifyInToSpaceClosure cl; + verifier.oops_do(&cl); +} + +void ShenandoahVerifier::verify_roots_no_forwarded() { + ShenandoahRootVerifier verifier; + ShenandoahVerifyNoForwared cl; + verifier.oops_do(&cl); +} + +void ShenandoahVerifier::verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types) { + ShenandoahRootVerifier verifier; + verifier.excludes(types); + ShenandoahVerifyNoForwared cl; + verifier.oops_do(&cl); +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp 2020-01-17 17:10:51.580128194 +0100 @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP + +#include "gc/shared/markBitMap.hpp" +#include "gc/shenandoah/shenandoahRootVerifier.hpp" +#include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/stack.hpp" + +class ShenandoahHeap; + +#ifdef _WINDOWS +#pragma warning( disable : 4522 ) +#endif + +class ShenandoahVerifierTask { +public: + ShenandoahVerifierTask(oop o = NULL, int idx = 0): _obj(o) { } + ShenandoahVerifierTask(oop o, size_t idx): _obj(o) { } + ShenandoahVerifierTask(const ShenandoahVerifierTask& t): _obj(t._obj) { } + + ShenandoahVerifierTask& operator =(const ShenandoahVerifierTask& t) { + _obj = t._obj; + return *this; + } + volatile ShenandoahVerifierTask& + operator =(const volatile ShenandoahVerifierTask& t) volatile { + (void)const_cast(_obj = t._obj); + return *this; + } + + inline oop obj() const { return _obj; } + +private: + oop _obj; +}; + +typedef Stack ShenandoahVerifierStack; +typedef volatile juint ShenandoahLivenessData; + +class ShenandoahVerifier : public CHeapObj { +private: + ShenandoahHeap* _heap; + MarkBitMap* _verification_bit_map; +public: + typedef enum { + // Disable marked objects verification. + _verify_marked_disable, + + // Objects should be marked in "next" bitmap. + _verify_marked_incomplete, + + // Objects should be marked in "complete" bitmap. + _verify_marked_complete + } VerifyMarked; + + typedef enum { + // Disable forwarded objects verification. + _verify_forwarded_disable, + + // Objects should not have forwardees. + _verify_forwarded_none, + + // Objects may have forwardees. + _verify_forwarded_allow + } VerifyForwarded; + + typedef enum { + // Disable collection set verification. + _verify_cset_disable, + + // Should have no references to cset. + _verify_cset_none, + + // May have references to cset, all should be forwarded. + // Note: Allowing non-forwarded references to cset is equivalent + // to _verify_cset_disable. + _verify_cset_forwarded + } VerifyCollectionSet; + + typedef enum { + // Disable liveness verification + _verify_liveness_disable, + + // All objects should belong to live regions + _verify_liveness_conservative, + + // All objects should belong to live regions, + // and liveness data should be accurate + _verify_liveness_complete + } VerifyLiveness; + + typedef enum { + // Disable region verification + _verify_regions_disable, + + // No trash regions allowed + _verify_regions_notrash, + + // No collection set regions allowed + _verify_regions_nocset, + + // No trash and no cset regions allowed + _verify_regions_notrash_nocset + } VerifyRegions; + + typedef enum { + // Disable gc-state verification + _verify_gcstate_disable, + + // Nothing is in progress, no forwarded objects + _verify_gcstate_stable, + + // Nothing is in progress, some objects are forwarded + _verify_gcstate_forwarded, + + // Evacuation is in progress, some objects are forwarded + _verify_gcstate_evacuation + } VerifyGCState; + + struct VerifyOptions { + VerifyForwarded _verify_forwarded; + VerifyMarked _verify_marked; + VerifyCollectionSet _verify_cset; + VerifyLiveness _verify_liveness; + VerifyRegions _verify_regions; + VerifyGCState _verify_gcstate; + + VerifyOptions(VerifyForwarded verify_forwarded, + VerifyMarked verify_marked, + VerifyCollectionSet verify_collection_set, + VerifyLiveness verify_liveness, + VerifyRegions verify_regions, + VerifyGCState verify_gcstate) : + _verify_forwarded(verify_forwarded), _verify_marked(verify_marked), + _verify_cset(verify_collection_set), + _verify_liveness(verify_liveness), _verify_regions(verify_regions), + _verify_gcstate(verify_gcstate) {} + }; + +private: + void verify_at_safepoint(const char *label, + VerifyForwarded forwarded, + VerifyMarked marked, + VerifyCollectionSet cset, + VerifyLiveness liveness, + VerifyRegions regions, + VerifyGCState gcstate); + +public: + ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) : + _heap(heap), _verification_bit_map(verification_bitmap) {}; + + void verify_before_concmark(); + void verify_after_concmark(); + void verify_before_evacuation(); + void verify_during_evacuation(); + void verify_after_evacuation(); + void verify_before_updaterefs(); + void verify_after_updaterefs(); + void verify_before_fullgc(); + void verify_after_fullgc(); + void verify_before_traversal(); + void verify_after_traversal(); + void verify_after_degenerated(); + void verify_generic(VerifyOption option); + + // Roots should only contain to-space oops + void verify_roots_in_to_space(); + void verify_roots_no_forwarded(); + void verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp 2020-01-17 17:10:52.187128160 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/shenandoahWorkGroup.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" + +#include "logging/log.hpp" + +ShenandoahWorkerScope::ShenandoahWorkerScope(WorkGang* workers, uint nworkers, const char* msg, bool check) : + _workers(workers) { + assert(msg != NULL, "Missing message"); + + _n_workers = _workers->update_active_workers(nworkers); + assert(_n_workers <= nworkers, "Must be"); + + log_info(gc, task)("Using %u of %u workers for %s", + _n_workers, ShenandoahHeap::heap()->max_workers(), msg); + + if (check) { + ShenandoahHeap::heap()->assert_gc_workers(_n_workers); + } +} + +ShenandoahWorkerScope::~ShenandoahWorkerScope() { + assert(_workers->active_workers() == _n_workers, + "Active workers can not be changed within this scope"); +} + +ShenandoahPushWorkerScope::ShenandoahPushWorkerScope(WorkGang* workers, uint nworkers, bool check) : + _old_workers(workers->active_workers()), + _workers(workers) { + _n_workers = _workers->update_active_workers(nworkers); + assert(_n_workers <= nworkers, "Must be"); + + // bypass concurrent/parallel protocol check for non-regular paths, e.g. verifier, etc. + if (check) { + ShenandoahHeap::heap()->assert_gc_workers(_n_workers); + } +} + +ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() { + assert(_workers->active_workers() == _n_workers, + "Active workers can not be changed within this scope"); + // Restore old worker value + uint nworkers = _workers->update_active_workers(_old_workers); + assert(nworkers == _old_workers, "Must be able to restore"); +} + +ShenandoahPushWorkerQueuesScope::ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool check) : + ShenandoahPushWorkerScope(workers, nworkers, check), _queues(queues) { + _queues->reserve(_n_workers); +} + +ShenandoahPushWorkerQueuesScope::~ShenandoahPushWorkerQueuesScope() { + // Restore old worker value + _queues->reserve(_old_workers); +} + +AbstractGangWorker* ShenandoahWorkGang::install_worker(uint which) { + AbstractGangWorker* worker = WorkGang::install_worker(which); + ShenandoahThreadLocalData::create(worker); + if (_initialize_gclab) { + ShenandoahThreadLocalData::initialize_gclab(worker); + } + return worker; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp 2020-01-17 17:10:52.788128127 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP + +#include "gc/shared/workgroup.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "memory/allocation.hpp" + +class ShenandoahObjToScanQueueSet; + +class ShenandoahWorkerScope : public StackObj { +private: + uint _n_workers; + WorkGang* _workers; +public: + ShenandoahWorkerScope(WorkGang* workers, uint nworkers, const char* msg, bool do_check = true); + ~ShenandoahWorkerScope(); +}; + +class ShenandoahPushWorkerScope : StackObj { +protected: + uint _n_workers; + uint _old_workers; + WorkGang* _workers; + +public: + ShenandoahPushWorkerScope(WorkGang* workers, uint nworkers, bool do_check = true); + ~ShenandoahPushWorkerScope(); +}; + +class ShenandoahPushWorkerQueuesScope : public ShenandoahPushWorkerScope { +private: + ShenandoahObjToScanQueueSet* _queues; + +public: + ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool do_check = true); + ~ShenandoahPushWorkerQueuesScope(); +}; + +class ShenandoahWorkGang : public WorkGang { +private: + bool _initialize_gclab; +public: + ShenandoahWorkGang(const char* name, + uint workers, + bool are_GC_task_threads, + bool are_ConcurrentGC_threads) : + WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), _initialize_gclab(false) { + } + + // Create a GC worker and install it into the work gang. + // We need to initialize gclab for dynamic allocated workers + AbstractGangWorker* install_worker(uint which); + + // We allow _active_workers < _total_workers when UseDynamicNumberOfGCThreads is off. + // We use the same WorkGang for concurrent and parallel processing, and honor + // ConcGCThreads and ParallelGCThreads settings + virtual uint active_workers() const { + assert(_active_workers > 0, "no active worker"); + assert(_active_workers <= _total_workers, + "_active_workers: %u > _total_workers: %u", _active_workers, _total_workers); + return _active_workers; + } + + void set_initialize_gclab() { assert(!_initialize_gclab, "Can only enable once"); _initialize_gclab = true; } +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp 2020-01-17 17:10:53.387128094 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/adaptiveSizePolicy.hpp" +#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "runtime/thread.hpp" + +uint ShenandoahWorkerPolicy::_prev_par_marking = 0; +uint ShenandoahWorkerPolicy::_prev_conc_marking = 0; +uint ShenandoahWorkerPolicy::_prev_conc_evac = 0; +uint ShenandoahWorkerPolicy::_prev_fullgc = 0; +uint ShenandoahWorkerPolicy::_prev_degengc = 0; +uint ShenandoahWorkerPolicy::_prev_stw_traversal = 0; +uint ShenandoahWorkerPolicy::_prev_conc_traversal = 0; +uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0; +uint ShenandoahWorkerPolicy::_prev_par_update_ref = 0; +uint ShenandoahWorkerPolicy::_prev_conc_cleanup = 0; +uint ShenandoahWorkerPolicy::_prev_conc_reset = 0; + +uint ShenandoahWorkerPolicy::calc_workers_for_init_marking() { + uint active_workers = (_prev_par_marking == 0) ? ParallelGCThreads : _prev_par_marking; + + _prev_par_marking = + AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_par_marking; +} + +uint ShenandoahWorkerPolicy::calc_workers_for_conc_marking() { + uint active_workers = (_prev_conc_marking == 0) ? ConcGCThreads : _prev_conc_marking; + _prev_conc_marking = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_marking; +} + +// Reuse the calculation result from init marking +uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() { + return _prev_par_marking; +} + +// Calculate workers for concurrent evacuation (concurrent GC) +uint ShenandoahWorkerPolicy::calc_workers_for_conc_evac() { + uint active_workers = (_prev_conc_evac == 0) ? ConcGCThreads : _prev_conc_evac; + _prev_conc_evac = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_evac; +} + +// Calculate workers for parallel fullgc +uint ShenandoahWorkerPolicy::calc_workers_for_fullgc() { + uint active_workers = (_prev_fullgc == 0) ? ParallelGCThreads : _prev_fullgc; + _prev_fullgc = + AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_fullgc; +} + +// Calculate workers for parallel degenerated gc +uint ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated() { + uint active_workers = (_prev_degengc == 0) ? ParallelGCThreads : _prev_degengc; + _prev_degengc = + AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_degengc; +} + +// Calculate workers for Stop-the-world traversal GC +uint ShenandoahWorkerPolicy::calc_workers_for_stw_traversal() { + uint active_workers = (_prev_stw_traversal == 0) ? ParallelGCThreads : _prev_stw_traversal; + _prev_stw_traversal = + AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_stw_traversal; +} + +// Calculate workers for concurent traversal GC +uint ShenandoahWorkerPolicy::calc_workers_for_conc_traversal() { + uint active_workers = (_prev_conc_traversal == 0) ? ConcGCThreads : _prev_conc_traversal; + _prev_conc_traversal = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_traversal; +} + +// Calculate workers for concurrent reference update +uint ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref() { + uint active_workers = (_prev_conc_update_ref == 0) ? ConcGCThreads : _prev_conc_update_ref; + _prev_conc_update_ref = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_update_ref; +} + +// Calculate workers for parallel reference update +uint ShenandoahWorkerPolicy::calc_workers_for_final_update_ref() { + uint active_workers = (_prev_par_update_ref == 0) ? ParallelGCThreads : _prev_par_update_ref; + _prev_par_update_ref = + AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_par_update_ref; +} + +uint ShenandoahWorkerPolicy::calc_workers_for_conc_preclean() { + // Precleaning is single-threaded + return 1; +} + +uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() { + uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup; + _prev_conc_cleanup = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_cleanup; +} + +uint ShenandoahWorkerPolicy::calc_workers_for_conc_reset() { + uint active_workers = (_prev_conc_reset == 0) ? ConcGCThreads : _prev_conc_reset; + _prev_conc_reset = + AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_reset; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp 2020-01-17 17:10:53.985128061 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP + +#include "memory/allocation.hpp" + +class ShenandoahWorkerPolicy : AllStatic { +private: + static uint _prev_par_marking; + static uint _prev_conc_marking; + static uint _prev_conc_evac; + static uint _prev_fullgc; + static uint _prev_degengc; + static uint _prev_stw_traversal; + static uint _prev_conc_traversal; + static uint _prev_conc_update_ref; + static uint _prev_par_update_ref; + static uint _prev_conc_cleanup; + static uint _prev_conc_reset; + +public: + // Calculate the number of workers for initial marking + static uint calc_workers_for_init_marking(); + + // Calculate the number of workers for concurrent marking + static uint calc_workers_for_conc_marking(); + + // Calculate the number of workers for final marking + static uint calc_workers_for_final_marking(); + + // Calculate workers for concurrent evacuation (concurrent GC) + static uint calc_workers_for_conc_evac(); + + // Calculate workers for parallel full gc + static uint calc_workers_for_fullgc(); + + // Calculate workers for parallel degenerated gc + static uint calc_workers_for_stw_degenerated(); + + // Calculate workers for Stop-the-world traversal GC + static uint calc_workers_for_stw_traversal(); + + // Calculate workers for concurrent traversal GC + static uint calc_workers_for_conc_traversal(); + + // Calculate workers for concurrent reference update + static uint calc_workers_for_conc_update_ref(); + + // Calculate workers for parallel/final reference update + static uint calc_workers_for_final_update_ref(); + + // Calculate workers for concurrent precleaning + static uint calc_workers_for_conc_preclean(); + + // Calculate workers for concurrent cleanup + static uint calc_workers_for_conc_cleanup(); + + // Calculate workers for concurrent reset + static uint calc_workers_for_conc_reset(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp 2020-01-17 17:10:54.596128028 +0100 @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP + +#define GC_SHENANDOAH_FLAGS(develop, \ + develop_pd, \ + product, \ + product_pd, \ + diagnostic, \ + diagnostic_pd, \ + experimental, \ + notproduct, \ + manageable, \ + product_rw, \ + lp64_product, \ + range, \ + constraint, \ + writeable) \ + \ + experimental(size_t, ShenandoahHeapRegionSize, 0, \ + "Size of the Shenandoah regions. Set to zero to detect " \ + "automatically.") \ + \ + experimental(size_t, ShenandoahTargetNumRegions, 2048, \ + "Target number of regions. We try to get around that many " \ + "regions, based on Shenandoah{Min,Max}RegionSize.") \ + \ + experimental(size_t, ShenandoahMinRegionSize, 256 * K, \ + "Minimum Shenandoah heap region size.") \ + \ + experimental(size_t, ShenandoahMaxRegionSize, 32 * M, \ + "Maximum Shenandoah heap region size.") \ + \ + experimental(intx, ShenandoahHumongousThreshold, 100, \ + "How large should the object be to get allocated in humongous " \ + "region, in percents of heap region size. This also caps the " \ + "maximum TLAB size.") \ + range(1, 100) \ + \ + product(ccstr, ShenandoahGCHeuristics, "adaptive", \ + "The heuristics to use in Shenandoah GC. Possible values:" \ + " *) adaptive - adapt to maintain the given amount of free heap;" \ + " *) static - start concurrent GC when static free heap " \ + " threshold and static allocation threshold are " \ + " tripped;" \ + " *) aggressive - run concurrent GC continuously, evacuate " \ + " everything;" \ + " *) compact - run GC with lower footprint target, may end up " \ + " doing continuous GC, evacuate lots of live " \ + " objects, uncommit heap aggressively;") \ + \ + product(ccstr, ShenandoahGCMode, "normal", \ + "The GC mode to use in Shenandoah GC. Possible values" \ + " *) normal - normal GC (mark-evac-update)" \ + " *) traversal - traversal GC (single-pass)" \ + " *) passive - disable concurrent GC, do stop-the-world GC") \ + \ + experimental(ccstr, ShenandoahUpdateRefsEarly, "adaptive", \ + "Run a separate concurrent reference updating phase after" \ + "concurrent evacuation. Possible values: 'on', 'off', 'adaptive'")\ + \ + experimental(uintx, ShenandoahRefProcFrequency, 5, \ + "How often should (weak, soft, etc) references be processed. " \ + "References get processed at every Nth GC cycle. Set to zero " \ + "to disable reference processing.") \ + \ + experimental(uintx, ShenandoahUnloadClassesFrequency, 5, \ + "How often should classes get unloaded. " \ + "Class unloading is performed at every Nth GC cycle. " \ + "Set to zero to disable class unloading during concurrent GC.") \ + \ + experimental(uintx, ShenandoahGarbageThreshold, 60, \ + "Sets the percentage of garbage a region need to contain before " \ + "it can be marked for collection. Does not apply to all " \ + "heuristics.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahFreeThreshold, 10, \ + "Set the percentage of free heap at which a GC cycle is started. "\ + "Does not apply to all heuristics.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahInitFreeThreshold, 70, \ + "Initial remaining free heap threshold for learning steps in " \ + "heuristics. In percents of total heap size. Does not apply to " \ + "all heuristics.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahMinFreeThreshold, 10, \ + "Minimum remaining free space threshold, after which collection " \ + "definitely triggers. Does not apply to all heuristics.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahAllocationThreshold, 0, \ + "Set percentage of memory allocated since last GC cycle before " \ + "a new GC cycle can be started. Set to zero to effectively " \ + "disable.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahLearningSteps, 5, \ + "Number of GC cycles to run in order to learn application " \ + "and GC performance for adaptive heuristics.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahImmediateThreshold, 90, \ + "If mark identifies more than this much immediate garbage " \ + "regions, it shall recycle them, and shall not continue the " \ + "rest of the GC cycle. The value is in percents of total " \ + "number of candidate regions for collection set. Setting this " \ + "threshold to 100% effectively disables this shortcut.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahMergeUpdateRefsMinGap, 100, \ + "If GC is currently running in separate update-refs mode " \ + "this numbers gives the threshold when to switch to " \ + "merged update-refs mode. Number is percentage relative to" \ + "duration(marking)+duration(update-refs).") \ + \ + experimental(uintx, ShenandoahMergeUpdateRefsMaxGap, 200, \ + "If GC is currently running in merged update-refs mode " \ + "this numbers gives the threshold when to switch to " \ + "separate update-refs mode. Number is percentage relative " \ + "to duration(marking)+duration(update-refs).") \ + \ + experimental(uintx, ShenandoahGuaranteedGCInterval, 5*60*1000, \ + "Adaptive and dynamic heuristics would guarantee a GC cycle " \ + "at least with this interval. This is useful when large idle" \ + " intervals are present, where GC can run without stealing " \ + "time from active application. Time is in milliseconds.") \ + \ + experimental(bool, ShenandoahAlwaysClearSoftRefs, false, \ + "Clear soft references always, instead of using any smart " \ + "cleanup policy. This minimizes footprint at expense of more " \ + "softref churn in applications.") \ + \ + experimental(bool, ShenandoahUncommit, true, \ + "Allow Shenandoah to uncommit unused memory.") \ + \ + experimental(uintx, ShenandoahUncommitDelay, 5*60*1000, \ + "Shenandoah would start to uncommit memory for regions that were" \ + " not used for more than this time. First use after that would " \ + "incur allocation stalls. Actively used regions would never be " \ + "uncommitted, because they never decay. Time is in milliseconds." \ + "Setting this delay to 0 effectively makes Shenandoah to " \ + "uncommit the regions almost immediately.") \ + \ + experimental(bool, ShenandoahRegionSampling, false, \ + "Turns on heap region sampling via JVMStat") \ + \ + experimental(int, ShenandoahRegionSamplingRate, 40, \ + "Sampling rate for heap region sampling. " \ + "Number of milliseconds between samples") \ + \ + experimental(uintx, ShenandoahControlIntervalMin, 1, \ + "The minumum sleep interval for control loop that drives " \ + "the cycles. Lower values would increase GC responsiveness " \ + "to changing heap conditions, at the expense of higher perf " \ + "overhead. Time is in milliseconds.") \ + \ + experimental(uintx, ShenandoahControlIntervalMax, 10, \ + "The maximum sleep interval for control loop that drives " \ + "the cycles. Lower values would increase GC responsiveness " \ + "to changing heap conditions, at the expense of higher perf " \ + "overhead. Time is in milliseconds.") \ + \ + experimental(uintx, ShenandoahControlIntervalAdjustPeriod, 1000, \ + "The time period for one step in control loop interval " \ + "adjustment. Lower values make adjustments faster, at the " \ + "expense of higher perf overhead. Time is in milliseconds.") \ + \ + experimental(bool, ShenandoahCriticalControlThreadPriority, false, \ + "Shenandoah control thread runs at critical scheduling priority.")\ + \ + diagnostic(bool, ShenandoahVerify, false, \ + "Verify the Shenandoah garbage collector") \ + \ + diagnostic(intx, ShenandoahVerifyLevel, 4, \ + "Shenandoah verification level: " \ + "0 = basic heap checks; " \ + "1 = previous level, plus basic region checks; " \ + "2 = previous level, plus all roots; " \ + "3 = previous level, plus all reachable objects; " \ + "4 = previous level, plus all marked objects") \ + \ + diagnostic(bool, ShenandoahElasticTLAB, true, \ + "Use Elastic TLABs with Shenandoah") \ + \ + diagnostic(bool, ShenandoahAllowMixedAllocs, true, \ + "Allow mixing mutator and collector allocations in a single " \ + "region") \ + \ + experimental(uintx, ShenandoahAllocSpikeFactor, 5, \ + "The amount of heap space to reserve for absorbing the " \ + "allocation spikes. Larger value wastes more memory in " \ + "non-emergency cases, but provides more safety in emergency " \ + "cases. In percents of total heap size.") \ + range(0,100) \ + \ + experimental(uintx, ShenandoahEvacReserve, 5, \ + "Maximum amount of free space to reserve for evacuation. " \ + "Larger values make GC more aggressive, while leaving less " \ + "headroom for application to allocate in. " \ + "In percents of total heap size.") \ + range(1,100) \ + \ + experimental(double, ShenandoahEvacWaste, 1.2, \ + "How much waste evacuations produce within the reserved " \ + "space. Larger values make evacuations more resilient " \ + "against allocation failures, at expense of smaller csets " \ + "on each cycle.") \ + range(1.0,100.0) \ + \ + experimental(bool, ShenandoahEvacReserveOverflow, true, \ + "Allow evacuations to overflow the reserved space. " \ + "Enabling it will make evacuations more resilient when " \ + "evacuation reserve/waste is incorrect, at the risk that " \ + "application allocations run out of memory too early.") \ + \ + diagnostic(bool, ShenandoahAllocationTrace, false, \ + "Trace allocation latencies and stalls. Can be expensive when " \ + "lots of allocations happen, and may introduce scalability " \ + "bottlenecks.") \ + \ + diagnostic(intx, ShenandoahAllocationStallThreshold, 10000, \ + "When allocation tracing is enabled, the allocation stalls " \ + "larger than this threshold would be reported as warnings. " \ + "Time is in microseconds.") \ + \ + experimental(uintx, ShenandoahEvacAssist, 10, \ + "How many objects to evacuate on LRB assist path. " \ + "Use zero to disable.") \ + \ + experimental(bool, ShenandoahPacing, true, \ + "Pace application allocations to give GC chance to start " \ + "and complete before allocation failure is reached.") \ + \ + experimental(uintx, ShenandoahPacingMaxDelay, 10, \ + "Max delay for pacing application allocations. " \ + "Time is in milliseconds.") \ + \ + experimental(uintx, ShenandoahPacingIdleSlack, 2, \ + "Percent of heap counted as non-taxable allocations during idle. "\ + "Larger value makes the pacing milder during idle phases, " \ + "requiring less rendezvous with control thread. Lower value " \ + "makes the pacing control less responsive to out-of-cycle allocs.")\ + range(0, 100) \ + \ + experimental(uintx, ShenandoahPacingCycleSlack, 10, \ + "Percent of free space taken as non-taxable allocations during " \ + "the GC cycle. Larger value makes the pacing milder at the " \ + "beginning of the GC cycle. Lower value makes the pacing less " \ + "uniform during the cycle.") \ + range(0, 100) \ + \ + experimental(double, ShenandoahPacingSurcharge, 1.1, \ + "Additional pacing tax surcharge to help unclutter the heap. " \ + "Larger values makes the pacing more aggressive. Lower values " \ + "risk GC cycles finish with less memory than were available at " \ + "the beginning of it.") \ + range(1.0, 100.0) \ + \ + experimental(uintx, ShenandoahCriticalFreeThreshold, 1, \ + "Percent of heap that needs to be free after recovery cycles, " \ + "either Degenerated or Full GC. If this much space is not " \ + "available, next recovery step would triggered.") \ + range(0, 100) \ + \ + diagnostic(bool, ShenandoahDegeneratedGC, true, \ + "Use Degenerated GC as the graceful degradation step. Disabling " \ + "this leads to degradation to Full GC") \ + \ + experimental(uintx, ShenandoahFullGCThreshold, 3, \ + "How many back-to-back Degenerated GCs to do before triggering " \ + "a Full GC.") \ + \ + experimental(bool, ShenandoahImplicitGCInvokesConcurrent, false, \ + "Should internally-caused GCs invoke concurrent cycles, or go to" \ + "stop-the-world (degenerated/full)?") \ + \ + diagnostic(bool, ShenandoahHumongousMoves, true, \ + "Allow moving humongous regions. This makes GC more resistant " \ + "to external fragmentation that may otherwise fail other " \ + "humongous allocations, at the expense of higher GC copying " \ + "costs. Currently affects stop-the-world (full) cycle only.") \ + \ + diagnostic(bool, ShenandoahOOMDuringEvacALot, false, \ + "Simulate OOM during evacuation frequently.") \ + \ + diagnostic(bool, ShenandoahAllocFailureALot, false, \ + "Make lots of artificial allocation failures.") \ + \ + diagnostic(bool, ShenandoahTerminationTrace, false, \ + "Tracing task termination timings") \ + \ + diagnostic(bool, ShenandoahAlwaysPreTouch, false, \ + "Pre-touch heap memory, overrides global AlwaysPreTouch") \ + \ + experimental(intx, ShenandoahMarkScanPrefetch, 32, \ + "How many objects to prefetch ahead when traversing mark bitmaps."\ + "Set to 0 to disable prefetching.") \ + range(0, 256) \ + \ + experimental(uintx, ShenandoahMarkLoopStride, 1000, \ + "How many items are processed during one marking step") \ + \ + experimental(uintx, ShenandoahParallelRegionStride, 1024, \ + "How many regions are processed in one stride during parallel " \ + "iteration.") \ + \ + experimental(size_t, ShenandoahSATBBufferSize, 1 * K, \ + "Number of entries in an SATB log buffer.") \ + range(1, max_uintx) \ + \ + experimental(uintx, ShenandoahSATBBufferFlushInterval, 100, \ + "Forcefully flush non-empty SATB buffers at this interval. " \ + "Time is in milliseconds.") \ + \ + experimental(uint, ShenandoahParallelSafepointThreads, 4, \ + "Number of parallel threads used for safepoint prolog/epilog") \ + \ + experimental(bool, ShenandoahPreclean, true, \ + "Do concurrent preclean phase before final mark: process " \ + "definitely alive references to avoid dealing with them during " \ + "pause.") \ + \ + experimental(bool, ShenandoahSuspendibleWorkers, false, \ + "Suspend concurrent GC worker threads at safepoints") \ + \ + diagnostic(bool, ShenandoahSATBBarrier, true, \ + "Turn on/off SATB barriers in Shenandoah") \ + \ + diagnostic(bool, ShenandoahKeepAliveBarrier, true, \ + "Turn on/off keep alive barriers in Shenandoah") \ + \ + diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false, \ + "Turn on/off enqueuing of oops for storeval barriers") \ + \ + diagnostic(bool, ShenandoahCASBarrier, true, \ + "Turn on/off CAS barriers in Shenandoah") \ + \ + diagnostic(bool, ShenandoahCloneBarrier, true, \ + "Turn on/off clone barriers in Shenandoah") \ + \ + diagnostic(bool, ShenandoahLoadRefBarrier, true, \ + "Turn on/off load-reference barriers in Shenandoah") \ + \ + experimental(bool, ShenandoahConcurrentScanCodeRoots, true, \ + "Scan code roots concurrently, instead of during a pause") \ + \ + experimental(uintx, ShenandoahCodeRootsStyle, 2, \ + "Use this style to scan code cache:" \ + " 0 - sequential iterator;" \ + " 1 - parallel iterator;" \ + " 2 - parallel iterator with cset filters;") \ + \ + diagnostic(bool, ShenandoahOptimizeStaticFinals, true, \ + "Optimize barriers on static final fields. " \ + "Turn it off for maximum compatibility with reflection or JNI " \ + "code that manipulates final fields.") \ + \ + experimental(bool, ShenandoahCommonGCStateLoads, false, \ + "Enable commonming for GC state loads in generated code.") \ + \ + develop(bool, ShenandoahVerifyOptoBarriers, false, \ + "Verify no missing barriers in C2") \ + \ + experimental(bool, ShenandoahLoopOptsAfterExpansion, true, \ + "Attempt more loop opts after barrier expansion") \ + \ + diagnostic(bool, ShenandoahSelfFixing, true, \ + "Fix references with load reference barrier. Disabling this " \ + "might degrade performance.") \ + + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp 2020-01-17 17:10:55.209127994 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP +#define SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP + +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" + +#define VM_STRUCTS_SHENANDOAH(nonstatic_field, volatile_nonstatic_field, static_field) \ + static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \ + nonstatic_field(ShenandoahHeap, _num_regions, size_t) \ + volatile_nonstatic_field(ShenandoahHeap, _used, size_t) \ + volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \ + +#define VM_INT_CONSTANTS_SHENANDOAH(declare_constant, declare_constant_with_value) + +#define VM_TYPES_SHENANDOAH(declare_type, \ + declare_toplevel_type, \ + declare_integer_type) \ + declare_type(ShenandoahHeap, CollectedHeap) \ + declare_type(ShenandoahHeapRegion, ContiguousSpace) \ + declare_toplevel_type(ShenandoahHeap*) \ + declare_toplevel_type(ShenandoahHeapRegion*) \ + +#endif // SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java 2020-01-17 17:10:55.808127961 +0100 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.shenandoah; + +import sun.jvm.hotspot.gc.shared.CollectedHeap; +import sun.jvm.hotspot.gc.shared.CollectedHeapName; +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; +import sun.jvm.hotspot.memory.MemRegion; +import sun.jvm.hotspot.types.CIntegerField; +import java.io.PrintStream; +import java.util.Observable; +import java.util.Observer; + +public class ShenandoahHeap extends CollectedHeap { + static private CIntegerField numRegions; + static private CIntegerField used; + static private CIntegerField committed; + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + static private synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("ShenandoahHeap"); + numRegions = type.getCIntegerField("_num_regions"); + used = type.getCIntegerField("_used"); + committed = type.getCIntegerField("_committed"); + } + + @Override + public CollectedHeapName kind() { + return CollectedHeapName.SHENANDOAH; + } + + public long numOfRegions() { + return numRegions.getValue(addr); + } + + @Override + public long capacity() { + return numOfRegions() * ShenandoahHeapRegion.regionSizeBytes(); + } + + @Override + public long used() { + return used.getValue(addr); + } + + public long committed() { + return committed.getValue(addr); + } + + @Override + public void printOn(PrintStream tty) { + MemRegion mr = reservedRegion(); + tty.print("Shenandoah heap"); + tty.print(" [" + mr.start() + ", " + mr.end() + "]"); + tty.println(" region size " + ShenandoahHeapRegion.regionSizeBytes() / 1024 + " K"); + } + + public ShenandoahHeap(Address addr) { + super(addr); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeapRegion.java 2020-01-17 17:10:56.410127928 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2017, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.shenandoah; + +import sun.jvm.hotspot.gc.shared.ContiguousSpace; +import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; +import sun.jvm.hotspot.debugger.Address; + +import java.util.Observable; +import java.util.Observer; + + +public class ShenandoahHeapRegion extends ContiguousSpace { + private static CIntegerField RegionSizeBytes; + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + static private synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("ShenandoahHeapRegion"); + RegionSizeBytes = type.getCIntegerField("RegionSizeBytes"); + } + + public static long regionSizeBytes() { return RegionSizeBytes.getValue(); } + + public ShenandoahHeapRegion(Address addr) { + super(addr); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesShenandoah.java 2020-01-17 17:10:57.019127894 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations + * @library /test/lib / + * + * @modules java.base/jdk.internal.misc + * + * @requires os.arch=="aarch64" & vm.debug == true & + * vm.flavor == "server" & !vm.graal.enabled & + * vm.gc.Shenandoah + * + * @build compiler.c2.aarch64.TestVolatiles + * compiler.c2.aarch64.TestVolatileLoad + * compiler.c2.aarch64.TestUnsafeVolatileLoad + * compiler.c2.aarch64.TestVolatileStore + * compiler.c2.aarch64.TestUnsafeVolatileStore + * compiler.c2.aarch64.TestUnsafeVolatileCAS + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestVolatileLoad Shenandoah + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestVolatileStore Shenandoah + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileLoad Shenandoah + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileStore Shenandoah + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileCAS Shenandoah + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestVolatileLoad ShenandoahTraversal + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestVolatileStore ShenandoahTraversal + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileLoad ShenandoahTraversal + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileStore ShenandoahTraversal + * + * @run driver compiler.c2.aarch64.TestVolatilesSerial + * TestUnsafeVolatileCAS ShenandoahTraversal + */ + +package compiler.c2.aarch64; + +public class TestVolatilesShenandoah { + public static void main(String args[]) throws Throwable + { + // delegate work to shared code + new TestVolatiles().runtest(args[0], args[1]); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestAllocHumongousFragment.java 2020-01-17 17:10:57.624127861 +0100 @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestAllocHumongousFragment + * @summary Make sure Shenandoah can recover from humongous allocation fragmentation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestAllocHumongousFragment + */ + +/* + * @test TestAllocHumongousFragment + * @summary Make sure Shenandoah can recover from humongous allocation fragmentation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestAllocHumongousFragment + * + * @run main/othervm -Xmx1g -Xms1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestAllocHumongousFragment + */ + +/* + * @test TestAllocHumongousFragment + * @summary Make sure Shenandoah can recover from humongous allocation fragmentation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocHumongousFragment + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocHumongousFragment + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestAllocHumongousFragment + * + * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestAllocHumongousFragment + */ + +import java.util.*; +import java.util.concurrent.*; + +public class TestAllocHumongousFragment { + + static final long TARGET_MB = Long.getLong("target", 30_000); // 30 Gb allocations + static final long LIVE_MB = Long.getLong("occupancy", 700); // 700 Mb alive + + static volatile Object sink; + + static List objects; + + public static void main(String[] args) throws Exception { + final int min = 128 * 1024; + final int max = 16 * 1024 * 1024; + final long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + objects = new ArrayList<>(); + long current = 0; + + Random r = new Random(); + for (long c = 0; c < count; c++) { + while (current > LIVE_MB * 1024 * 1024) { + int idx = ThreadLocalRandom.current().nextInt(objects.size()); + int[] remove = objects.remove(idx); + current -= remove.length * 4 + 16; + } + + int[] newObj = new int[min + r.nextInt(max - min)]; + current += newObj.length * 4 + 16; + objects.add(newObj); + sink = new Object(); + + System.out.println("Allocated: " + (current / 1024 / 1024) + " Mb"); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java 2020-01-17 17:10:58.228127828 +0100 @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestAllocIntArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestAllocIntArrays + */ + +/* + * @test TestAllocIntArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestAllocIntArrays + */ + +/* + * @test TestAllocIntArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestAllocIntArrays + */ + +import java.util.Random; + +public class TestAllocIntArrays { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new int[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java 2020-01-17 17:10:58.835127794 +0100 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestAllocObjectArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestAllocObjectArrays + +/* + * @test TestAllocObjectArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestAllocObjectArrays + */ + +/* + * @test TestAllocObjectArrays + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestAllocObjectArrays + */ + +import java.util.Random; + +public class TestAllocObjectArrays { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new Object[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java 2020-01-17 17:10:59.444127761 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestAllocObjects + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestAllocObjects + */ + +/* + * @test TestAllocObjects + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahSuspendibleWorkers + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahSuspendibleWorkers + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * -XX:+ShenandoahSuspendibleWorkers + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -XX:+ShenandoahSuspendibleWorkers + * TestAllocObjects + */ + +/* + * @test TestAllocObjects + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahSuspendibleWorkers + * TestAllocObjects + */ + +import java.util.Random; + +public class TestAllocObjects { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java 2020-01-17 17:11:00.048127727 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestArrayCopyCheckCast + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyCheckCast + */ +public class TestArrayCopyCheckCast { + + static class Foo {} + static class Bar {} + + public static void main(String[] args) throws Exception { + try { + Object[] array1 = new Object[1]; + array1[0] = new Bar(); + Foo[] array2 = new Foo[1]; + System.arraycopy(array1, 0, array2, 0, 1); + throw new RuntimeException(); + } catch (ArrayStoreException ex) { + // expected + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java 2020-01-17 17:11:00.654127694 +0100 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +import java.util.concurrent.*; + +/* + * @test TestArrayCopyStress + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyStress + */ +public class TestArrayCopyStress { + + private static final int ARRAY_SIZE = 1000; + private static final int ITERATIONS = 10000; + + static class Foo { + int num; + + Foo(int num) { + this.num = num; + } + } + + static class Bar {} + + public static void main(String[] args) throws Exception { + for (int i = 0; i < ITERATIONS; i++) { + testConjoint(); + } + } + + private static void testConjoint() { + Foo[] array = new Foo[ARRAY_SIZE]; + for (int i = 0; i < ARRAY_SIZE; i++) { + array[i] = new Foo(i); + } + + int src_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE); + int dst_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE); + int len = ThreadLocalRandom.current().nextInt(0, Math.min(ARRAY_SIZE - src_idx, ARRAY_SIZE - dst_idx)); + System.arraycopy(array, src_idx, array, dst_idx, len); + + for (int i = 0; i < ARRAY_SIZE; i++) { + if (i >= dst_idx && i < dst_idx + len) { + assertEquals(array[i].num, i - (dst_idx - src_idx)); + } else { + assertEquals(array[i].num, i); + } + } + } + + private static void assertEquals(int a, int b) { + if (a != b) throw new RuntimeException("assert failed"); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestElasticTLAB.java 2020-01-17 17:11:01.258127661 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestElasticTLAB + * @summary Test that Shenandoah is able to work with elastic TLABs + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB TestElasticTLAB + */ + +import java.util.Random; + +public class TestElasticTLAB { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new int[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java 2020-01-17 17:11:01.864127627 +0100 @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestEvilSyncBug + * @summary Tests for crash/assert when attaching init thread during shutdown + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver/timeout=480 TestEvilSyncBug + */ + +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.locks.*; + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestEvilSyncBug { + + private static final int NUM_RUNS = 100; + + static Thread[] hooks = new MyHook[10000]; + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + test(); + } else { + ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); + + Future[] fs = new Future[NUM_RUNS]; + + for (int c = 0; c < NUM_RUNS; c++) { + Callable task = () -> { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xms128m", + "-Xmx128m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=aggressive", + "TestEvilSyncBug", "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + return null; + }; + fs[c] = pool.submit(task); + } + + for (Future f : fs) { + f.get(); + } + + pool.shutdown(); + pool.awaitTermination(1, TimeUnit.HOURS); + } + } + + private static void test() throws Exception { + + for (int t = 0; t < hooks.length; t++) { + hooks[t] = new MyHook(); + } + + ExecutorService service = Executors.newFixedThreadPool( + 2, + r -> { + Thread t = new Thread(r); + t.setDaemon(true); + return t; + } + ); + + List> futures = new ArrayList<>(); + for (int c = 0; c < 100; c++) { + Runtime.getRuntime().addShutdownHook(hooks[c]); + final Test[] tests = new Test[1000]; + for (int t = 0; t < tests.length; t++) { + tests[t] = new Test(); + } + + Future f1 = service.submit(() -> { + Runtime.getRuntime().addShutdownHook(new MyHook()); + IntResult2 r = new IntResult2(); + for (Test test : tests) { + test.RL_Us(r); + } + }); + Future f2 = service.submit(() -> { + Runtime.getRuntime().addShutdownHook(new MyHook()); + for (Test test : tests) { + test.WLI_Us(); + } + }); + + futures.add(f1); + futures.add(f2); + } + + for (Future f : futures) { + f.get(); + } + } + + public static class IntResult2 { + int r1, r2; + } + + public static class Test { + final StampedLock lock = new StampedLock(); + + int x, y; + + public void RL_Us(IntResult2 r) { + StampedLock lock = this.lock; + long stamp = lock.readLock(); + r.r1 = x; + r.r2 = y; + lock.unlock(stamp); + } + + public void WLI_Us() { + try { + StampedLock lock = this.lock; + long stamp = lock.writeLockInterruptibly(); + x = 1; + y = 2; + lock.unlock(stamp); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + private static class MyHook extends Thread { + @Override + public void run() { + try { + Thread.sleep(10); + } catch (Exception e) {} + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java 2020-01-17 17:11:02.458127594 +0100 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestGCThreadGroups + * @summary Test Shenandoah GC uses concurrent/parallel threads correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups + */ + +/** + * @test TestGCThreadGroups + * @summary Test Shenandoah GC uses concurrent/parallel threads correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:-UseDynamicNumberOfGCThreads + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:+ForceDynamicNumberOfGCThreads + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=100 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=100 + * TestGCThreadGroups + */ + +/** + * @test TestGCThreadGroups + * @summary Test Shenandoah GC uses concurrent/parallel threads correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 + * TestGCThreadGroups +*/ + +public class TestGCThreadGroups { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle + static final long STRIDE = 100_000; + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c += STRIDE) { + for (long s = 0; s < STRIDE; s++) { + sink = new Object(); + } + Thread.sleep(1); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java 2020-01-17 17:11:03.067127561 +0100 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestHeapUncommit + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestHeapUncommit + */ + +/* + * @test TestHeapUncommit + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestHeapUncommit + */ + +/* + * @test TestHeapUncommit + * @summary Acceptance tests: collector can withstand allocation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestHeapUncommit + */ + +/* + * @test TestHeapUncommit + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages + * -XX:+UseShenandoahGC + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestHeapUncommit + */ + +import java.util.Random; + +public class TestHeapUncommit { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new int[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java 2020-01-17 17:11:03.677127527 +0100 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestHumongousThreshold + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive + * TestHumongousThreshold + */ + +/* + * @test TestHumongousThreshold + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + */ + +import java.util.Random; + +public class TestHumongousThreshold { + + static final long TARGET_MB = Long.getLong("target", 20_000); // 20 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new int[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java 2020-01-17 17:11:04.285127494 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestLargeObjectAlignment + * @summary Shenandoah crashes with -XX:ObjectAlignmentInBytes=16 + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xint TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:-TieredCompilation TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=1 TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=4 TestLargeObjectAlignment + */ + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +public class TestLargeObjectAlignment { + + static final int SLABS_COUNT = Integer.getInteger("slabs", 10000); + static final int NODE_COUNT = Integer.getInteger("nodes", 10000); + static final long TIME_NS = 1000L * 1000L * Integer.getInteger("timeMs", 5000); + + static Object[] objects; + + public static void main(String[] args) throws Exception { + objects = new Object[SLABS_COUNT]; + + long start = System.nanoTime(); + while (System.nanoTime() - start < TIME_NS) { + objects[ThreadLocalRandom.current().nextInt(SLABS_COUNT)] = createSome(); + } + } + + public static Object createSome() { + List result = new ArrayList(); + for (int c = 0; c < NODE_COUNT; c++) { + result.add(new Integer(c)); + } + return result; + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java 2020-01-17 17:11:04.895127460 +0100 @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestLotsOfCycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * -Dtarget=10000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * -Dtarget=10000 + * TestLotsOfCycles + */ + +/* + * @test TestLotsOfCycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -Dtarget=10000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * -Dtarget=10000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -Dtarget=1000 + * TestLotsOfCycles + */ + +/* + * @test TestLotsOfCycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -Dtarget=1000 + * TestLotsOfCycles + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -Dtarget=10000 + * TestLotsOfCycles + */ + +public class TestLotsOfCycles { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle + static final long STRIDE = 100_000; + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c += STRIDE) { + for (long s = 0; s < STRIDE; s++) { + sink = new Object(); + } + Thread.sleep(1); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestObjItrWithHeapDump.java 2020-01-17 17:11:05.509127426 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestObjIterWithHeapDump + * @summary Test heap dump triggered heap object iteration + * @key gc + * @bug 8225014 + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run driver TestObjItrWithHeapDump + */ + +import java.util.*; + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestObjItrWithHeapDump { + public static void testWith(String... args) throws Exception { + String[] cmds = Arrays.copyOf(args, args.length + 2); + cmds[args.length] = TestObjItrWithHeapDump.class.getName(); + cmds[args.length + 1] = "test"; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + output.shouldContain("Class Histogram (before full gc)"); + output.shouldContain("Class Histogram (after full gc)"); + } + + public static void main(String[] args) throws Exception { + if (args.length > 0 && args[0].equals("test")) { + System.gc(); + System.exit(0); + } + + String[][][] modeHeuristics = new String[][][] { + {{"normal"}, {"adaptive", "compact", "static", "aggressive"}}, + {{"traversal"}, {"adaptive", "aggressive"}}, + {{"passive"}, {"passive"}} + }; + + for (String[][] mh : modeHeuristics) { + String mode = mh[0][0]; + String[] heuristics = mh[1]; + for (String h : heuristics) { + testWith("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:-ShenandoahDegeneratedGC", + "-XX:ShenandoahGCMode=" + mode, + "-XX:ShenandoahGCHeuristics=" + h, + "-Xlog:gc+classhisto=trace", + "-XX:-ExplicitGCInvokesConcurrent", + "-Xmx512M" + ); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java 2020-01-17 17:11:06.119127393 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestParallelRefprocSanity + * @summary Test that reference processing works with both parallel and non-parallel variants. + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestParallelRefprocSanity + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-ParallelRefProcEnabled TestParallelRefprocSanity + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:+ParallelRefProcEnabled TestParallelRefprocSanity + */ + +import java.lang.ref.*; + +public class TestParallelRefprocSanity { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 32; + for (long c = 0; c < count; c++) { + sink = new WeakReference(new Object()); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java 2020-01-17 17:11:06.727127359 +0100 @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestPeriodicGC + * @summary Test that periodic GC is working + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run driver TestPeriodicGC + */ + +import java.util.*; + +import jdk.test.lib.Asserts; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestPeriodicGC { + + public static void testWith(String msg, boolean periodic, String... args) throws Exception { + String[] cmds = Arrays.copyOf(args, args.length + 2); + cmds[args.length] = TestPeriodicGC.class.getName(); + cmds[args.length + 1] = "test"; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + if (periodic && !output.getOutput().contains("Trigger: Time since last GC")) { + throw new AssertionError(msg + ": Should have periodic GC in logs"); + } + if (!periodic && output.getOutput().contains("Trigger: Time since last GC")) { + throw new AssertionError(msg + ": Should not have periodic GC in logs"); + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 0 && args[0].equals("test")) { + Thread.sleep(5000); // stay idle + return; + } + + String[] enabled = new String[] { + "adaptive", + "compact", + "static", + }; + + for (String h : enabled) { + testWith("Short period with " + h, + true, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=" + h, + "-XX:ShenandoahGuaranteedGCInterval=1000" + ); + + testWith("Long period with " + h, + false, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=" + h, + "-XX:ShenandoahGuaranteedGCInterval=100000" // deliberately too long + ); + } + + testWith("Short period with traversal mode", + true, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCMode=traversal", + "-XX:ShenandoahGuaranteedGCInterval=1000" + ); + + testWith("Long period with traversal mode", + false, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCMode=traversal", + "-XX:ShenandoahGuaranteedGCInterval=100000" // deliberately too long + ); + + testWith("Short period with aggressive", + false, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=aggressive", + "-XX:ShenandoahGuaranteedGCInterval=1000" + ); + testWith("Short period with passive", + false, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCMode=passive", + "-XX:ShenandoahGuaranteedGCInterval=1000" + ); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java 2020-01-17 17:11:07.330127326 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestRefprocSanity + * @summary Test that null references/referents work fine + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestRefprocSanity + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * TestRefprocSanity + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestRefprocSanity + */ + +/* + * @test TestRefprocSanity + * @summary Test that null references/referents work fine + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestRefprocSanity + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestRefprocSanity + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestRefprocSanity + */ + +import java.lang.ref.*; + +public class TestRefprocSanity { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + static final int WINDOW = 10_000; + + static final Reference[] refs = new Reference[WINDOW]; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 32; + int rIdx = 0; + + ReferenceQueue rq = new ReferenceQueue(); + + for (int c = 0; c < WINDOW; c++) { + refs[c] = select(c, new MyObject(c), rq); + } + + for (int c = 0; c < count; c++) { + verifyRefAt(rIdx); + refs[rIdx] = select(c, new MyObject(rIdx), rq); + + rIdx++; + if (rIdx >= WINDOW) { + rIdx = 0; + } + while (rq.poll() != null); // drain + } + } + + static Reference select(int v, MyObject ext, ReferenceQueue rq) { + switch (v % 10) { + case 0: return new SoftReference(null); + case 1: return new SoftReference(null, rq); + case 2: return new SoftReference(ext); + case 3: return new SoftReference(ext, rq); + case 4: return new WeakReference(null); + case 5: return new WeakReference(null, rq); + case 6: return new WeakReference(ext); + case 7: return new WeakReference(ext, rq); + case 8: return new PhantomReference(null, rq); + case 9: return new PhantomReference(ext, rq); + default: throw new IllegalStateException(); + } + } + + static void verifyRefAt(int idx) { + Reference ref = refs[idx]; + MyObject mo = ref.get(); + if (mo != null && mo.x != idx) { + throw new IllegalStateException("Referent tag is incorrect: " + mo.x + ", should be " + idx); + } + } + + static class MyObject { + final int x; + + public MyObject(int x) { + this.x = x; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java 2020-01-17 17:11:07.937127293 +0100 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestRegionSampling + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestRegionSampling + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestRegionSampling + */ + +/* + * @test TestRegionSampling + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestRegionSampling + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestRegionSampling + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestRegionSampling + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestRegionSampling + */ + +/* + * @test TestRegionSampling + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestRegionSampling + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestRegionSampling + * + */ + +public class TestRegionSampling { + + static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java 2020-01-17 17:11:08.545127259 +0100 @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestRetainObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestRetainObjects + */ + +/* + * @test TestRetainObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestRetainObjects + */ + +/* + * @test TestRetainObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestRetainObjects + */ + +public class TestRetainObjects { + + static final int COUNT = 10_000_000; + static final int WINDOW = 10_000; + + static final String[] reachable = new String[WINDOW]; + + public static void main(String[] args) throws Exception { + int rIdx = 0; + for (int c = 0; c < COUNT; c++) { + reachable[rIdx] = ("LargeString" + c); + rIdx++; + if (rIdx >= WINDOW) { + rIdx = 0; + } + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java 2020-01-17 17:11:09.146127226 +0100 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestSieveObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestSieveObjects + */ + +/* + * @test TestSieveObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestSieveObjects + * + * @run main/othervm/timeout=240 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestSieveObjects + */ + +/* + * @test TestSieveObjects + * @summary Acceptance tests: collector can deal with retained objects + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestSieveObjects + */ + +import java.util.concurrent.ThreadLocalRandom; + +public class TestSieveObjects { + + static final int COUNT = 100_000_000; + static final int WINDOW = 1_000_000; + static final int PAYLOAD = 100; + + static final MyObject[] arr = new MyObject[WINDOW]; + + public static void main(String[] args) throws Exception { + int rIdx = 0; + for (int c = 0; c < COUNT; c++) { + MyObject v = arr[rIdx]; + if (v != null) { + if (v.x != rIdx) { + throw new IllegalStateException("Illegal value at index " + rIdx + ": " + v.x); + } + if (ThreadLocalRandom.current().nextInt(1000) > 100) { + arr[rIdx] = null; + } + } else { + if (ThreadLocalRandom.current().nextInt(1000) > 500) { + arr[rIdx] = new MyObject(rIdx); + } + } + rIdx++; + if (rIdx >= WINDOW) { + rIdx = 0; + } + } + } + + public static class MyObject { + public int x; + public byte[] payload; + + public MyObject(int x) { + this.x = x; + this.payload = new byte[PAYLOAD]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java 2020-01-17 17:11:09.750127193 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestSmallHeap + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx64m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx32m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx8m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx4m TestSmallHeap + */ + +public class TestSmallHeap { + + public static void main(String[] args) throws Exception { + System.out.println("Hello World!"); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java 2020-01-17 17:11:10.355127159 +0100 @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestStringDedup + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestStringDedup + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestStringDedup + */ + +/* + * @test TestStringDedup + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestStringDedup + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC + * TestStringDedup + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestStringDedup + */ + +/* + * @test TestStringDedup + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestStringDedup + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestStringDedup + */ + +import java.lang.reflect.*; +import java.util.*; + +import sun.misc.*; + +public class TestStringDedup { + private static Field valueField; + private static Unsafe unsafe; + + private static final int UniqueStrings = 20; + + static { + try { + Field field = Unsafe.class.getDeclaredField("theUnsafe"); + field.setAccessible(true); + unsafe = (Unsafe) field.get(null); + + valueField = String.class.getDeclaredField("value"); + valueField.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static Object getValue(String string) { + try { + return valueField.get(string); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + static class StringAndId { + private String str; + private int id; + + public StringAndId(String str, int id) { + this.str = str; + this.id = id; + } + + public String str() { + return str; + } + + public int id() { + return id; + } + } + + private static void generateStrings(ArrayList strs, int unique_strs) { + Random rn = new Random(); + for (int u = 0; u < unique_strs; u++) { + int n = rn.nextInt() % 10; + n = Math.max(n, 2); + for (int index = 0; index < n; index++) { + strs.add(new StringAndId("Unique String " + u, u)); + } + } + } + + private static int verifyDedepString(ArrayList strs) { + HashMap seen = new HashMap<>(); + int total = 0; + int dedup = 0; + + for (StringAndId item : strs) { + total++; + StringAndId existing_item = seen.get(getValue(item.str())); + if (existing_item == null) { + seen.put(getValue(item.str()), item); + } else { + if (item.id() != existing_item.id() || + !item.str().equals(existing_item.str())) { + System.out.println("StringDedup error:"); + System.out.println("String: " + item.str() + " != " + existing_item.str()); + throw new RuntimeException("StringDedup Test failed"); + } else { + dedup++; + } + } + } + System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup)); + return (total - dedup); + } + + public static void main(String[] args) { + ArrayList astrs = new ArrayList<>(); + generateStrings(astrs, UniqueStrings); + System.gc(); + System.gc(); + System.gc(); + System.gc(); + System.gc(); + + if (verifyDedepString(astrs) != UniqueStrings) { + // Can not guarantee all strings are deduplicated, there can + // still have pending items in queues. + System.out.println("Not all strings are deduplicated"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java 2020-01-17 17:11:10.966127126 +0100 @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestStringDedupStress + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestStringDedupStress + */ + +/* + * @test TestStringDedupStress + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC + * -DtargetStrings=3000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC + * -XX:ShenandoahUpdateRefsEarly=off + * -DtargetStrings=3000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -XX:ShenandoahUpdateRefsEarly=off + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:ShenandoahUpdateRefsEarly=off + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:ShenandoahUpdateRefsEarly=off -XX:+ShenandoahOOMDuringEvacALot + * -DtargetStrings=2000000 + * TestStringDedupStress + */ + + /* + * @test TestStringDedupStress + * @summary Test Shenandoah string deduplication implementation + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc:open + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahOOMDuringEvacALot + * -DtargetStrings=2000000 + * TestStringDedupStress + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * -DtargetStrings=2000000 + * TestStringDedupStress + */ + +import java.lang.management.*; +import java.lang.reflect.*; +import java.util.*; + +import sun.misc.*; + +public class TestStringDedupStress { + private static Field valueField; + private static Unsafe unsafe; + + private static final int TARGET_STRINGS = Integer.getInteger("targetStrings", 2_500_000); + private static final long MAX_REWRITE_GC_CYCLES = 6; + private static final long MAX_REWRITE_TIME = 30*1000; // ms + + private static final int UNIQUE_STRINGS = 20; + + static { + try { + Field field = Unsafe.class.getDeclaredField("theUnsafe"); + field.setAccessible(true); + unsafe = (Unsafe) field.get(null); + + valueField = String.class.getDeclaredField("value"); + valueField.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static Object getValue(String string) { + try { + return valueField.get(string); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + static class StringAndId { + private String str; + private int id; + + public StringAndId(String str, int id) { + this.str = str; + this.id = id; + } + + public String str() { + return str; + } + + public int id() { + return id; + } + } + + // Generate uniqueStrings number of strings + private static void generateStrings(ArrayList strs, int uniqueStrings) { + Random rn = new Random(); + for (int u = 0; u < uniqueStrings; u++) { + int n = rn.nextInt(uniqueStrings); + strs.add(new StringAndId("Unique String " + n, n)); + } + } + + private static int verifyDedupString(ArrayList strs) { + Map seen = new HashMap<>(TARGET_STRINGS*2); + int total = 0; + int dedup = 0; + + for (StringAndId item : strs) { + total++; + StringAndId existingItem = seen.get(getValue(item.str())); + if (existingItem == null) { + seen.put(getValue(item.str()), item); + } else { + if (item.id() != existingItem.id() || + !item.str().equals(existingItem.str())) { + System.out.println("StringDedup error:"); + System.out.println("id: " + item.id() + " != " + existingItem.id()); + System.out.println("or String: " + item.str() + " != " + existingItem.str()); + throw new RuntimeException("StringDedup Test failed"); + } else { + dedup++; + } + } + } + System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup)); + return (total - dedup); + } + + static volatile ArrayList astrs = new ArrayList<>(); + static GarbageCollectorMXBean gcCycleMBean; + + public static void main(String[] args) { + Random rn = new Random(); + + for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { + if ("Shenandoah Cycles".equals(bean.getName())) { + gcCycleMBean = bean; + break; + } + } + + if (gcCycleMBean == null) { + throw new RuntimeException("Can not find Shenandoah GC cycle mbean"); + } + + // Generate roughly TARGET_STRINGS strings, only UNIQUE_STRINGS are unique + int genIters = TARGET_STRINGS / UNIQUE_STRINGS; + for (int index = 0; index < genIters; index++) { + generateStrings(astrs, UNIQUE_STRINGS); + } + + long cycleBeforeRewrite = gcCycleMBean.getCollectionCount(); + long timeBeforeRewrite = System.currentTimeMillis(); + + long loop = 1; + while (true) { + int arrSize = astrs.size(); + int index = rn.nextInt(arrSize); + StringAndId item = astrs.get(index); + int n = rn.nextInt(UNIQUE_STRINGS); + item.str = "Unique String " + n; + item.id = n; + + if (loop++ % 1000 == 0) { + // enough GC cycles for rewritten strings to be deduplicated + if (gcCycleMBean.getCollectionCount() - cycleBeforeRewrite >= MAX_REWRITE_GC_CYCLES) { + break; + } + + // enough time is spent waiting for GC to happen + if (System.currentTimeMillis() - timeBeforeRewrite >= MAX_REWRITE_TIME) { + break; + } + } + } + verifyDedupString(astrs); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java 2020-01-17 17:11:11.572127092 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestStringInternCleanup + * @summary Check that Shenandoah cleans up interned strings + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestStringInternCleanup + */ + +/* + * @test TestStringInternCleanup + * @summary Check that Shenandoah cleans up interned strings + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:-ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC + * TestStringInternCleanup + */ + +/* + * @test TestStringInternCleanup + * @summary Check that Shenandoah cleans up interned strings + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestStringInternCleanup + */ + +public class TestStringInternCleanup { + + static final int COUNT = 1_000_000; + static final int WINDOW = 1_000; + + static final String[] reachable = new String[WINDOW]; + + public static void main(String[] args) throws Exception { + int rIdx = 0; + for (int c = 0; c < COUNT; c++) { + reachable[rIdx] = ("LargeInternedString" + c).intern(); + rIdx++; + if (rIdx >= WINDOW) { + rIdx = 0; + } + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java 2020-01-17 17:11:12.170127059 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestVerifyJCStress + * @summary Tests that we pass at least one jcstress-like test with all verification turned on + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc + * java.management + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestVerifyJCStress + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestVerifyJCStress + */ + +/* + * @test TestVerifyJCStress + * @summary Tests that we pass at least one jcstress-like test with all verification turned on + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc + * java.management + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers + * TestVerifyJCStress + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -XX:+ShenandoahVerify -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers + * TestVerifyJCStress + */ + +/* + * @test TestVerifyJCStress + * @summary Tests that we pass at least one jcstress-like test with all verification turned on + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc + * java.management + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers + * TestVerifyJCStress + */ + +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.locks.*; + +public class TestVerifyJCStress { + + public static void main(String[] args) throws Exception { + ExecutorService service = Executors.newFixedThreadPool( + 2, + r -> { + Thread t = new Thread(r); + t.setDaemon(true); + return t; + } + ); + + for (int c = 0; c < 10000; c++) { + final Test[] tests = new Test[10000]; + for (int t = 0; t < tests.length; t++) { + tests[t] = new Test(); + } + + Future f1 = service.submit(() -> { + IntResult2 r = new IntResult2(); + for (Test test : tests) { + test.RL_Us(r); + } + }); + Future f2 = service.submit(() -> { + for (Test test : tests) { + test.WLI_Us(); + } + }); + + f1.get(); + f2.get(); + } + } + + public static class IntResult2 { + int r1, r2; + } + + public static class Test { + final StampedLock lock = new StampedLock(); + + int x, y; + + public void RL_Us(IntResult2 r) { + StampedLock lock = this.lock; + long stamp = lock.readLock(); + r.r1 = x; + r.r2 = y; + lock.unlock(stamp); + } + + public void WLI_Us() { + try { + StampedLock lock = this.lock; + long stamp = lock.writeLockInterruptibly(); + x = 1; + y = 2; + lock.unlock(stamp); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java 2020-01-17 17:11:12.776127026 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestVerifyLevels + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=0 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=1 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=2 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=3 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=4 TestVerifyLevels + */ + +public class TestVerifyLevels { + + static final long TARGET_MB = Long.getLong("target", 1_000); // 1 Gb allocation + + static Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java 2020-01-17 17:11:13.381126993 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + /* + * @test TestWithLogLevel + * @summary Test Shenandoah with different log levels + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=error TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=warning TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=info TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=debug TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=trace TestWithLogLevel + */ + +import java.util.*; + +public class TestWithLogLevel { + public static void main(String[] args) { + ArrayList list = new ArrayList<>(); + long count = 300 * 1024 * 1024 / 16; // 300MB allocation + for (long index = 0; index < count; index++) { + Object sink = new Object(); + list.add(sink); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java 2020-01-17 17:11:13.988126959 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestWrongArrayMember + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestWrongArrayMember + * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal TestWrongArrayMember + */ + +public class TestWrongArrayMember { + public static void main(String... args) throws Exception { + Object[] src = new Object[3]; + src[0] = new Integer(0); + src[1] = new Object(); + src[2] = new Object(); + Object[] dst = new Integer[3]; + dst[0] = new Integer(1); + dst[1] = new Integer(2); + dst[2] = new Integer(3); + try { + System.arraycopy(src, 0, dst, 0, 3); + throw new RuntimeException("Expected ArrayStoreException"); + } catch (ArrayStoreException e) { + if (src[0] != dst[0]) { + throw new RuntimeException("First element not copied"); + } else if (src[1] == dst[1] || src[2] == dst[2]) { + throw new RuntimeException("Second and third elements are affected"); + } else { + return; // Passed! + } + } + } +} + --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/CallMultipleCatchProjs.java 2020-01-17 17:11:14.593126926 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8231405 + * @summary barrier expansion breaks if barrier is right after call to rethrow stub + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:CompileOnly=CallMultipleCatchProjs::test -Xcomp -Xverify:none -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC CallMultipleCatchProjs + * + */ + +public class CallMultipleCatchProjs { + private static A field = new A(); + + public static void main(String[] args) throws Exception { + Exception3 exception3 = new Exception3(); + test(new Exception2()); + } + + static int test(Exception exception) throws Exception { + try { + throw exception; + } catch (Exception1 e1) { + return 1; + } catch (Exception2 e2) { + return field.i + 2; + } catch (Exception3 e3) { + return field.i + 3; + } + } + + private static class Exception1 extends Exception { + } + + private static class Exception2 extends Exception { + } + + private static class Exception3 extends Exception { + } + + private static class A { + public int i; + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/LRBRightAfterMemBar.java 2020-01-17 17:11:15.199126892 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8237007 + * @summary Shenandoah: assert(_base == Tuple) failure during C2 compilation + * @key gc + * @requires vm.flavor == "server" + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-BackgroundCompilation -XX:+UseShenandoahGC LRBRightAfterMemBar + * + */ + +public class LRBRightAfterMemBar { + private static Object field1; + private static Object field2; + static volatile int barrier; + + public static void main(String[] args) { + for (int i = 0; i < 20_000; i++) { + test(true, true, new Object()); + test(false, false, new Object()); + } + } + + private static Object test(boolean flag, boolean flag2, Object o2) { + for (int i = 0; i < 10; i++) { + barrier = 0x42; // Membar + if (o2 == null) { // hoisted out of loop + } + // The following line is converted to a CMove with an out + // of loop control once the null check above is + // hoisted. The CMove is pinned right after the membar and + // assigned the membar as control. + Object o = flag ? field1 : field2; + if (flag2) { + return o; + } + } + + return null; + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestC1ArrayCopyNPE.java 2020-01-17 17:11:15.798126859 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestC1ArrayCopyNPE + * @summary test C1 arraycopy intrinsic + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -XX:TieredStopAtLevel=1 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestC1ArrayCopyNPE + */ + +public class TestC1ArrayCopyNPE { + + private static final int NUM_RUNS = 10000; + private static final int ARRAY_SIZE = 10000; + private static int[] a; + private static int[] b; + + public static void main(String[] args) { + a = null; + b = new int[ARRAY_SIZE]; + for (int i = 0; i < NUM_RUNS; i++) { + test(); + } + a = new int[ARRAY_SIZE]; + b = null; + for (int i = 0; i < NUM_RUNS; i++) { + test(); + } + } + + private static void test() { + try { + System.arraycopy(a, 0, b, 0, ARRAY_SIZE); + throw new RuntimeException("test failed"); + } catch (NullPointerException ex) { + // Ok + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestC1VectorizedMismatch.java 2020-01-17 17:11:16.406126826 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestC1VectorizedMismatch + * @summary test C1 vectorized mismatch intrinsic + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -XX:TieredStopAtLevel=1 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestC1VectorizedMismatch + */ + +import java.util.Arrays; + +public class TestC1VectorizedMismatch { + + private static final int NUM_RUNS = 10000; + private static final int ARRAY_SIZE = 10000; + private static int[] a; + private static int[] b; + + public static void main(String[] args) { + a = new int[ARRAY_SIZE]; + b = new int[ARRAY_SIZE]; + for (int i = 0; i < NUM_RUNS; i++) { + test(); + } + } + + private static void test() { + int[] a1 = new int[ARRAY_SIZE]; + int[] b1 = new int[ARRAY_SIZE]; + fillArray(a); + System.arraycopy(a, 0, b, 0, ARRAY_SIZE); + if (!Arrays.equals(a, b)) { + throw new RuntimeException("arrays not equal"); + } + } + + private static void fillArray(int[] array) { + for (int i = 0; i < ARRAY_SIZE; i++) { + int val = (int) (Math.random() * Integer.MAX_VALUE); + array[i] = val; + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java 2020-01-17 17:11:17.012126792 +0100 @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test TestClone + * @summary Test clone barriers work correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:TieredStopAtLevel=4 + * TestClone + */ + + +public class TestClone { + + public static void main(String[] args) throws Exception { + for (int i = 0; i < 10000; i++) { + Object[] src = new Object[i]; + for (int c = 0; c < src.length; c++) { + src[c] = new Object(); + } + testWith(src); + } + } + + static void testWith(Object[] src) { + Object[] dst = src.clone(); + int srcLen = src.length; + int dstLen = dst.length; + if (srcLen != dstLen) { + throw new IllegalStateException("Lengths do not match: " + srcLen + " vs " + dstLen); + } + for (int c = 0; c < src.length; c++) { + Object s = src[c]; + Object d = dst[c]; + if (s != d) { + throw new IllegalStateException("Elements do not match at " + c + ": " + s + " vs " + d); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestCommonGCLoads.java 2020-01-17 17:11:17.623126759 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestCommonGCLoads + * @summary Test GC state load commoning works + * @key gc + * @requires vm.flavor == "server" + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * -XX:-ShenandoahCommonGCStateLoads + * TestCommonGCLoads + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * -XX:+ShenandoahCommonGCStateLoads + * TestCommonGCLoads + */ + +public class TestCommonGCLoads { + + static Object d = new Object(); + + static Target t1 = new Target(); + static Target t2 = new Target(); + static Target t3 = new Target(); + static Target t4 = new Target(); + static Target t5 = new Target(); + + static void test() { + t1.field = d; + t2.field = d; + t3.field = d; + t4.field = d; + t5.field = d; + } + + static public void main(String[] args) { + for (int i = 0; i < 100_000; i++) { + test(); + } + } + + static class Target { + Object field; + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java 2020-01-17 17:11:18.228126725 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestExpandedWBLostNullCheckDep + * @summary Logic that moves a null check in the expanded barrier may cause a memory access that doesn't depend on the barrier to bypass the null check + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation + * -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * -XX:+StressGCM -XX:+StressLCM TestExpandedWBLostNullCheckDep + */ + +public class TestExpandedWBLostNullCheckDep { + + static void test(int i, int[] arr) { + // arr.length depends on a null check for arr + if (i < 0 || i >= arr.length) { + } + // The write barrier here also depends on the null check. The + // null check is moved in the barrier to enable implicit null + // checks. The null check must not be moved arr.length + arr[i] = 0x42; + } + + static public void main(String[] args) { + int[] int_arr = new int[10]; + for (int i = 0; i < 20000; i++) { + test(0, int_arr); + } + try { + test(0, null); + } catch (NullPointerException npe) {} + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java 2020-01-17 17:11:18.835126692 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestMaybeNullUnsafeAccess + * @summary cast before unsafe access moved in dominating null check null path causes crash + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation + * TestMaybeNullUnsafeAccess + * + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * TestMaybeNullUnsafeAccess + * + */ + +import jdk.internal.misc.Unsafe; + +import java.lang.reflect.Field; + +public class TestMaybeNullUnsafeAccess { + + static final jdk.internal.misc.Unsafe UNSAFE = Unsafe.getUnsafe(); + static final long F_OFFSET; + + static class A { + int f; + } + + static { + try { + Field fField = A.class.getDeclaredField("f"); + F_OFFSET = UNSAFE.objectFieldOffset(fField); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + static A test_helper(Object o) { + return (A) o; + } + + static int test(Object o) { + int f = 0; + for (int i = 0; i < 100; i++) { + A a = test_helper(o); + f = UNSAFE.getInt(a, F_OFFSET); + } + return f; + } + + static public void main(String[] args) { + A a = new A(); + for (int i = 0; i < 20000; i++) { + test_helper(null); + test_helper(a); + test(a); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestNullCheck.java 2020-01-17 17:11:19.445126658 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestNullCheck + * @summary implicit null check on brooks pointer must not cause crash + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * -Xmx4G -XX:HeapBaseMinAddress=0x800000000 TestNullCheck + */ + +// HeapBaseMinAddress above forces compressed oops with a base + +public class TestNullCheck { + + int f; + + static int test1(TestNullCheck o) { + return o.f; + } + + static TestNullCheck static_obj = new TestNullCheck(); + + static int test2() { + return static_obj.f; + } + + static public void main(String[] args) { + TestNullCheck o = new TestNullCheck(); + for (int i = 0; i < 20000; i++) { + test1(o); + test2(); + } + try { + test1(null); + } catch (NullPointerException npe) {} + static_obj = null; + try { + test2(); + } catch (NullPointerException npe) {} + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java 2020-01-17 17:11:20.055126625 +0100 @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * Run standalone with: --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/jdk.internal.misc=ALL-UNNAMED + */ + +/* + * @test TestReferenceCAS + * @summary Shenandoah reference CAS test + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC TestReferenceCAS + * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -Xint TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-TieredCompilation TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=1 TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=4 TestReferenceCAS + */ + +/* + * @test TestReferenceCAS + * @summary Shenandoah reference CAS test + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops TestReferenceCAS + * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -Xint TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:-TieredCompilation TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=1 TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=4 TestReferenceCAS + */ + +import java.lang.reflect.Field; + +public class TestReferenceCAS { + + static final int ITERS = Integer.getInteger("iters", 1); + static final int WEAK_ATTEMPTS = Integer.getInteger("weakAttempts", 10); + + static final jdk.internal.misc.Unsafe UNSAFE; + static final long V_OFFSET; + + static { + try { + Field f = jdk.internal.misc.Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + UNSAFE = (jdk.internal.misc.Unsafe) f.get(null); + } catch (Exception e) { + throw new RuntimeException("Unable to get Unsafe instance.", e); + } + + try { + Field vField = TestReferenceCAS.class.getDeclaredField("v"); + V_OFFSET = UNSAFE.objectFieldOffset(vField); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + Object v; + + private static void assertEquals(boolean a, boolean b, String msg) { + if (a != b) { + throw new RuntimeException("a (" + a + ") != b (" + b + "): " + msg); + } + } + + private static void assertEquals(Object a, Object b, String msg) { + if (!a.equals(b)) { + throw new RuntimeException("a (" + a.toString() + ") != b (" + b.toString() + "): " + msg); + } + } + + public static void main(String[] args) { + TestReferenceCAS t = new TestReferenceCAS(); + for (int c = 0; c < ITERS; c++) { + testAccess(t, V_OFFSET); + } + } + + static void testAccess(Object base, long offset) { + String foo = new String("foo"); + String bar = new String("bar"); + String baz = new String("baz"); + UNSAFE.putObject(base, offset, "foo"); + { + String newval = bar; + boolean r = UNSAFE.compareAndSetObject(base, offset, "foo", newval); + assertEquals(r, true, "success compareAndSet Object"); + assertEquals(newval, "bar", "must not destroy newval"); + Object x = UNSAFE.getObject(base, offset); + assertEquals(x, "bar", "success compareAndSet Object value"); + } + + { + String newval = baz; + boolean r = UNSAFE.compareAndSetObject(base, offset, "foo", newval); + assertEquals(r, false, "failing compareAndSet Object"); + assertEquals(newval, "baz", "must not destroy newval"); + Object x = UNSAFE.getObject(base, offset); + assertEquals(x, "bar", "failing compareAndSet Object value"); + } + + UNSAFE.putObject(base, offset, "bar"); + { + String newval = foo; + Object r = UNSAFE.compareAndExchangeObject(base, offset, "bar", newval); + assertEquals(r, "bar", "success compareAndExchange Object"); + assertEquals(newval, "foo", "must not destroy newval"); + Object x = UNSAFE.getObject(base, offset); + assertEquals(x, "foo", "success compareAndExchange Object value"); + } + + { + String newval = baz; + Object r = UNSAFE.compareAndExchangeObject(base, offset, "bar", newval); + assertEquals(r, "foo", "failing compareAndExchange Object"); + assertEquals(newval, "baz", "must not destroy newval"); + Object x = UNSAFE.getObject(base, offset); + assertEquals(x, "foo", "failing compareAndExchange Object value"); + } + + UNSAFE.putObject(base, offset, "bar"); + { + String newval = foo; + boolean success = false; + for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) { + success = UNSAFE.weakCompareAndSetObject(base, offset, "bar", newval); + assertEquals(newval, "foo", "must not destroy newval"); + } + assertEquals(success, true, "weakCompareAndSet Object"); + Object x = UNSAFE.getObject(base, offset); + assertEquals(x, "foo", "weakCompareAndSet Object"); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestUnsafeOffheapSwap.java 2020-01-17 17:11:20.662126591 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestUnsafeOffheapSwap + * @summary Miscompilation in Unsafe off-heap swap routines + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * TestUnsafeOffheapSwap + */ + +import java.util.*; +import jdk.internal.misc.Unsafe; + +public class TestUnsafeOffheapSwap { + + static final int SIZE = 10000; + static final long SEED = 1; + + static final jdk.internal.misc.Unsafe UNSAFE = Unsafe.getUnsafe(); + static final int SCALE = UNSAFE.ARRAY_INT_INDEX_SCALE; + + static Memory mem; + static int[] arr; + + public static void main(String[] args) throws Exception { + // Bug is exposed when memory.addr is not known statically + mem = new Memory(SIZE*SCALE); + arr = new int[SIZE]; + + for (int i = 0; i < 10; i++) { + test(); + } + } + + static void test() { + Random rnd = new Random(SEED); + for (int i = 0; i < SIZE; i++) { + int value = rnd.nextInt(); + mem.setInt(i, value); + arr[i] = value; + } + + for (int i = 0; i < SIZE; i++) { + if (arr[i] != mem.getInt(i)) { + throw new IllegalStateException("TESTBUG: Values mismatch before swaps"); + } + } + + for (int i = 1; i < SIZE; i++) { + mem.swap(i - 1, i); + int tmp = arr[i - 1]; + arr[i - 1] = arr[i]; + arr[i] = tmp; + } + + for (int i = 0; i < SIZE; i++) { + if (arr[i] != mem.getInt(i)) { + throw new IllegalStateException("Values mismatch after swaps"); + } + } + } + + static class Memory { + private final long addr; + + Memory(int size) { + addr = UNSAFE.allocateMemory(size); + } + + public int getInt(int idx) { + return UNSAFE.getInt(addr + idx*SCALE); + } + + public void setInt(int idx, int val) { + UNSAFE.putInt(addr + idx*SCALE, val); + } + + public void swap(int a, int b) { + int tmp = getInt(a); + setInt(a, getInt(b)); + setInt(b, tmp); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/compiler/TestWriteBarrierClearControl.java 2020-01-17 17:11:21.272126558 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestWriteBarrierClearControl + * @summary Clearing control during final graph reshape causes memory barrier to loose dependency on null check + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + * -XX:+UnlockDiagnosticVMOptions -XX:+StressLCM -XX:+StressGCM + * TestWriteBarrierClearControl + * + */ +public class TestWriteBarrierClearControl { + + int f; + + static void test1(TestWriteBarrierClearControl o) { + o.f = 0x42; + } + + static TestWriteBarrierClearControl fo = new TestWriteBarrierClearControl(); + + static void test2() { + TestWriteBarrierClearControl o = fo; + o.f = 0x42; + } + + static public void main(String[] args) { + TestWriteBarrierClearControl o = new TestWriteBarrierClearControl(); + for (int i = 0; i < 20000; i++) { + test1(o); + test2(); + } + try { + test1(null); + } catch (NullPointerException npe) {} + fo = null; + try { + test2(); + } catch (NullPointerException npe) {} + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/CriticalNativeArgs.java 2020-01-17 17:11:21.879126524 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test + * @requires (os.arch =="x86_64" | os.arch == "amd64") & (vm.bits == "64") & !vm.graal.enabled & vm.gc.Shenandoah + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeArgs + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=traversal -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs + */ +public class CriticalNativeArgs { + static { + System.loadLibrary("CriticalNative"); + } + + static native boolean isNull(int[] a); + + public static void main(String[] args) { + int[] arr = new int[2]; + + if (isNull(arr)) { + throw new RuntimeException("Should not be null"); + } + + if (!isNull(null)) { + throw new RuntimeException("Should be null"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/CriticalNativeStress.java 2020-01-17 17:11:22.485126491 +0100 @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +import java.util.Random; + +/* @test + * @requires (os.arch =="x86_64" | os.arch == "amd64") & (vm.bits == "64") & !vm.graal.enabled & vm.gc.Shenandoah + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeStress + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress + */ +public class CriticalNativeStress { + private static Random rand = new Random(); + + static { + System.loadLibrary("CriticalNative"); + } + + static final int CYCLES = 50; + static final int THREAD_PER_CASE = 1; + + static native long sum1(long[] a); + + // More than 6 parameters + static native long sum2(long a1, int[] a2, int[] a3, long[] a4, int[] a5); + + static long sum(long[] a) { + long sum = 0; + for (int index = 0; index < a.length; index++) { + sum += a[index]; + } + return sum; + } + + static long sum(int[] a) { + long sum = 0; + for (int index = 0; index < a.length; index++) { + sum += a[index]; + } + return sum; + } + + private static volatile String garbage_array[]; + + static void create_garbage(int len) { + len = Math.max(len, 1024); + String array[] = new String[len]; + for (int index = 0; index < len; index++) { + array[index] = "String " + index; + } + garbage_array = array; + } + + static void run_test_case1() { + int length = rand.nextInt(50) + 1; + long[] arr = new long[length]; + for (int index = 0; index < length; index++) { + arr[index] = rand.nextLong() % 10002; + } + + for (int index = 0; index < length; index++) { + create_garbage(index); + } + + long native_sum = sum1(arr); + long java_sum = sum(arr); + if (native_sum != java_sum) { + StringBuffer sb = new StringBuffer("Sums do not match: native = ") + .append(native_sum).append(" java = ").append(java_sum); + + throw new RuntimeException(sb.toString()); + } + } + + static void run_test_case2() { + int index; + long a1 = rand.nextLong() % 10245; + + int a2_length = rand.nextInt(50) + 1; + int[] a2 = new int[a2_length]; + for (index = 0; index < a2_length; index++) { + a2[index] = rand.nextInt(106); + } + + int a3_length = rand.nextInt(150) + 1; + int[] a3 = new int[a3_length]; + for (index = 0; index < a3_length; index++) { + a3[index] = rand.nextInt(3333); + } + + int a4_length = rand.nextInt(200) + 1; + long[] a4 = new long[a4_length]; + for (index = 0; index < a4_length; index++) { + a4[index] = rand.nextLong() % 12322; + } + + int a5_length = rand.nextInt(350) + 1; + int[] a5 = new int[a5_length]; + for (index = 0; index < a5_length; index++) { + a5[index] = rand.nextInt(3333); + } + + for (index = 0; index < a1; index++) { + create_garbage(index); + } + + long native_sum = sum2(a1, a2, a3, a4, a5); + long java_sum = a1 + sum(a2) + sum(a3) + sum(a4) + sum(a5); + if (native_sum != java_sum) { + StringBuffer sb = new StringBuffer("Sums do not match: native = ") + .append(native_sum).append(" java = ").append(java_sum); + + throw new RuntimeException(sb.toString()); + } + } + + static class Case1Runner extends Thread { + public Case1Runner() { + start(); + } + + public void run() { + for (int index = 0; index < CYCLES; index++) { + run_test_case1(); + } + } + } + + static class Case2Runner extends Thread { + public Case2Runner() { + start(); + } + + public void run() { + for (int index = 0; index < CYCLES; index++) { + run_test_case2(); + } + } + } + + public static void main(String[] args) { + Thread[] thrs = new Thread[THREAD_PER_CASE * 2]; + for (int index = 0; index < thrs.length; index = index + 2) { + thrs[index] = new Case1Runner(); + thrs[index + 1] = new Case2Runner(); + } + + for (int index = 0; index < thrs.length; index++) { + try { + thrs[index].join(); + } catch (Exception e) { + e.printStackTrace(); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java 2020-01-17 17:11:23.091126457 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestJNICritical + * @summary test JNI critical arrays support in Shenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify TestJNICritical + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestJNICritical + */ + +import java.util.Arrays; + +public class TestJNICritical { + static { + System.loadLibrary("TestJNICritical"); + } + + private static final int NUM_RUNS = 10000; + private static final int ARRAY_SIZE = 10000; + private static int[] a; + private static int[] b; + + private static native void copyAtoB(int[] a, int[] b); + + public static void main(String[] args) { + a = new int[ARRAY_SIZE]; + b = new int[ARRAY_SIZE]; + for (int i = 0; i < NUM_RUNS; i++) { + test(); + } + } + + private static void test() { + int[] a1 = new int[ARRAY_SIZE]; + int[] b1 = new int[ARRAY_SIZE]; + fillArray(a); + copyAtoB(a, b); + copyAtoB(a1, b1); // Don't optimize out garbage arrays. + if (!Arrays.equals(a, b)) { + throw new RuntimeException("arrays not equal"); + } + } + + private static void fillArray(int[] array) { + for (int i = 0; i < ARRAY_SIZE; i++) { + int val = (int) (Math.random() * Integer.MAX_VALUE); + array[i] = val; + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java 2020-01-17 17:11:23.693126424 +0100 @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestJNIGlobalRefs + * @summary Test JNI Global Refs with Shenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/native -Xmx1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahVerify + * TestJNIGlobalRefs + */ + +/* @test TestJNIGlobalRefs + * @summary Test JNI Global Refs with Shenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/native -Xmx1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestJNIGlobalRefs + */ + +import java.util.Arrays; +import java.util.Random; + +public class TestJNIGlobalRefs { + static { + System.loadLibrary("TestJNIGlobalRefs"); + } + + private static final int TIME_MSEC = 120000; + private static final int ARRAY_SIZE = 10000; + + private static native void makeGlobalRef(Object o); + private static native void makeWeakGlobalRef(Object o); + private static native Object readGlobalRef(); + private static native Object readWeakGlobalRef(); + + public static void main(String[] args) throws Throwable { + seedGlobalRef(); + seedWeakGlobalRef(); + long start = System.currentTimeMillis(); + long current = start; + while (current - start < TIME_MSEC) { + testGlobal(); + testWeakGlobal(); + Thread.sleep(1); + current = System.currentTimeMillis(); + } + } + + private static void seedGlobalRef() { + int[] a = new int[ARRAY_SIZE]; + fillArray(a, 1337); + makeGlobalRef(a); + } + + private static void seedWeakGlobalRef() { + int[] a = new int[ARRAY_SIZE]; + fillArray(a, 8080); + makeWeakGlobalRef(a); + } + + private static void testGlobal() { + int[] a = (int[]) readGlobalRef(); + checkArray(a, 1337); + } + + private static void testWeakGlobal() { + int[] a = (int[]) readWeakGlobalRef(); + if (a != null) { + checkArray(a, 8080); + } else { + // weak reference is cleaned, recreate: + seedWeakGlobalRef(); + } + } + + private static void fillArray(int[] array, int seed) { + Random r = new Random(seed); + for (int i = 0; i < ARRAY_SIZE; i++) { + array[i] = r.nextInt(); + } + } + + private static void checkArray(int[] array, int seed) { + Random r = new Random(seed); + if (array.length != ARRAY_SIZE) { + throw new IllegalStateException("Illegal array length: " + array.length + ", but expected " + ARRAY_SIZE); + } + for (int i = 0; i < ARRAY_SIZE; i++) { + int actual = array[i]; + int expected = r.nextInt(); + if (actual != expected) { + throw new IllegalStateException("Incorrect array data: " + actual + ", but expected " + expected); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java 2020-01-17 17:11:24.298126391 +0100 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestPinnedGarbage + * @summary Test that garbage in the pinned region does not crash VM + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx512m + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahVerify -XX:+ShenandoahDegeneratedGC + * TestPinnedGarbage + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx512m + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahVerify -XX:-ShenandoahDegeneratedGC + * TestPinnedGarbage + */ + +/* @test TestPinnedGarbage + * @summary Test that garbage in the pinned region does not crash VM + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx512m + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestPinnedGarbage + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx512m + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestPinnedGarbage + */ + +import java.util.Arrays; +import java.util.concurrent.*; + +public class TestPinnedGarbage { + static { + System.loadLibrary("TestPinnedGarbage"); + } + + private static final int NUM_RUNS = 1_000; + private static final int OBJS_COUNT = 1_000; + private static final int GARBAGE_COUNT = 1_000_000; + + private static native void pin(int[] a); + private static native void unpin(int[] a); + + public static void main(String[] args) { + for (int i = 0; i < NUM_RUNS; i++) { + test(); + } + } + + private static void test() { + Object[] objs = new Object[OBJS_COUNT]; + for (int i = 0; i < OBJS_COUNT; i++) { + objs[i] = new MyClass(); + } + + int[] cog = new int[10]; + int cogIdx = ThreadLocalRandom.current().nextInt(OBJS_COUNT); + objs[cogIdx] = cog; + pin(cog); + + for (int i = 0; i < GARBAGE_COUNT; i++) { + int rIdx = ThreadLocalRandom.current().nextInt(OBJS_COUNT); + if (rIdx != cogIdx) { + objs[rIdx] = new MyClass(); + } + } + + unpin(cog); + } + + public static class MyClass { + public Object ref = new Object(); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/libCriticalNative.c 2020-01-17 17:11:24.896126358 +0100 @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "jni.h" + +JNIEXPORT jlong JNICALL JavaCritical_CriticalNativeStress_sum1 + (jint length, jlong* a) { + jlong sum = 0; + jint index; + for (index = 0; index < length; index ++) { + sum += a[index]; + } + + return sum; +} + +JNIEXPORT jlong JNICALL JavaCritical_CriticalNativeStress_sum2 + (jlong a1, jint a2_length, jint* a2, jint a4_length, jint* a4, jint a6_length, jlong* a6, jint a8_length, jint* a8) { + jlong sum = a1; + jint index; + for (index = 0; index < a2_length; index ++) { + sum += a2[index]; + } + + for (index = 0; index < a4_length; index ++) { + sum += a4[index]; + } + + for (index = 0; index < a6_length; index ++) { + sum += a6[index]; + } + + for (index = 0; index < a8_length; index ++) { + sum += a8[index]; + } + return sum; +} + +JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum1 + (JNIEnv *env, jclass jclazz, jlongArray a) { + jlong sum = 0; + jsize len = (*env)->GetArrayLength(env, a); + jsize index; + jlong* arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a, 0); + for (index = 0; index < len; index ++) { + sum += arr[index]; + } + + (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0); + return sum; +} + +JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum2 + (JNIEnv *env, jclass jclazz, jlong a1, jintArray a2, jintArray a3, jlongArray a4, jintArray a5) { + jlong sum = a1; + jsize index; + jsize len = (*env)->GetArrayLength(env, a2); + jint* a2_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a2, 0); + for (index = 0; index < len; index ++) { + sum += a2_arr[index]; + } + (*env)->ReleasePrimitiveArrayCritical(env, a2, a2_arr, 0); + + len = (*env)->GetArrayLength(env, a3); + jint* a3_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a3, 0); + for (index = 0; index < len; index ++) { + sum += a3_arr[index]; + } + (*env)->ReleasePrimitiveArrayCritical(env, a3, a3_arr, 0); + + len = (*env)->GetArrayLength(env, a4); + jlong* a4_arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a4, 0); + for (index = 0; index < len; index ++) { + sum += a4_arr[index]; + } + (*env)->ReleasePrimitiveArrayCritical(env, a4, a4_arr, 0); + + len = (*env)->GetArrayLength(env, a5); + jint* a5_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a5, 0); + for (index = 0; index < len; index ++) { + sum += a5_arr[index]; + } + (*env)->ReleasePrimitiveArrayCritical(env, a5, a5_arr, 0); + + return sum; +} + +JNIEXPORT jboolean JNICALL JavaCritical_CriticalNativeArgs_isNull + (jint length, jint* a) { + return (a == NULL) && (length == 0); +} + +JNIEXPORT jboolean JNICALL Java_CriticalNativeArgs_isNull + (JNIEnv *env, jclass jclazz, jintArray a) { + jboolean is_null; + jsize len = (*env)->GetArrayLength(env, a); + jint* arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a, 0); + is_null = (arr == NULL) && (len == 0); + (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0); + return is_null; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/libTestJNICritical.c 2020-01-17 17:11:25.504126325 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include +#include + +JNIEXPORT void JNICALL +Java_TestJNICritical_copyAtoB(JNIEnv *env, jclass unused, jintArray a, jintArray b) { + jint len = (*env)->GetArrayLength(env, a); + jint* aa = (*env)->GetPrimitiveArrayCritical(env, a, 0); + jint* bb = (*env)->GetPrimitiveArrayCritical(env, b, 0); + memcpy(bb, aa, len * sizeof(jint)); + (*env)->ReleasePrimitiveArrayCritical(env, b, bb, 0); + (*env)->ReleasePrimitiveArrayCritical(env, a, aa, 0); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/libTestJNIGlobalRefs.c 2020-01-17 17:11:26.105126291 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include +#include + +jobject global_ref = NULL; +jobject weak_global_ref = NULL; + +JNIEXPORT void JNICALL +Java_TestJNIGlobalRefs_makeGlobalRef(JNIEnv *env, jclass unused, jobject o) { + global_ref = (*env)->NewGlobalRef(env, o); +} + +JNIEXPORT void JNICALL +Java_TestJNIGlobalRefs_makeWeakGlobalRef(JNIEnv *env, jclass unused, jobject o) { + weak_global_ref = (*env)->NewWeakGlobalRef(env, o); +} + +JNIEXPORT jobject JNICALL +Java_TestJNIGlobalRefs_readGlobalRef(JNIEnv *env, jclass unused) { + return global_ref; +} + +JNIEXPORT jobject JNICALL +Java_TestJNIGlobalRefs_readWeakGlobalRef(JNIEnv *env, jclass unused) { + return weak_global_ref; +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jni/libTestPinnedGarbage.c 2020-01-17 17:11:26.716126258 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include +#include + +static jint* pinned; + +JNIEXPORT void JNICALL +Java_TestPinnedGarbage_pin(JNIEnv *env, jclass unused, jintArray a) { + pinned = (*env)->GetPrimitiveArrayCritical(env, a, 0); +} + +JNIEXPORT void JNICALL +Java_TestPinnedGarbage_unpin(JNIEnv *env, jclass unused, jintArray a) { + (*env)->ReleasePrimitiveArrayCritical(env, a, pinned, 0); +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java 2020-01-17 17:11:27.320126224 +0100 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestHeapDump + * @summary Tests JVMTI heap dumps + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @compile TestHeapDump.java + * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m -XX:ShenandoahGCHeuristics=aggressive TestHeapDump + * + */ + +/** + * @test TestHeapDump + * @summary Tests JVMTI heap dumps + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * @compile TestHeapDump.java + * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m -XX:ShenandoahGCHeuristics=aggressive -XX:-UseCompressedOops TestHeapDump + */ + +import java.lang.ref.Reference; + +public class TestHeapDump { + + private static final int NUM_ITER = 10000; + + private static final int ARRAY_SIZE = 1000; + + private static final int EXPECTED_OBJECTS = + ARRAY_SIZE + // array reachable from instance field + 1 + // static field root + 1; // local field root + + static { + try { + System.loadLibrary("TestHeapDump"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load TestHeapDump library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int heapdump(Class filterClass); + + public static void main(String args[]) { + new TestHeapDump().run(); + } + + // This root needs to be discovered + static Object root = new TestObject(); + + // This field needs to be discovered + TestObject[] array; + + public void run() { + array = new TestObject[ARRAY_SIZE]; + for (int i = 0; i < ARRAY_SIZE; i++) { + array[i] = new TestObject(); + } + TestObject localRoot = new TestObject(); + for (int i = 0; i < NUM_ITER; i++) { + int numObjs = heapdump(TestObject.class); + if (numObjs != EXPECTED_OBJECTS) { + throw new RuntimeException("Expected " + EXPECTED_OBJECTS + " objects, but got " + numObjs); + } + } + Reference.reachabilityFence(array); + Reference.reachabilityFence(localRoot); + } + + // We look for the instances of this class during the heap scan + public static class TestObject {} +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/jvmti/libTestHeapDump.c 2020-01-17 17:11:27.930126191 +0100 @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2017, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include +#include +#include "jvmti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef JNI_ENV_ARG + +#ifdef __cplusplus +#define JNI_ENV_ARG(x, y) y +#define JNI_ENV_PTR(x) x +#else +#define JNI_ENV_ARG(x,y) x, y +#define JNI_ENV_PTR(x) (*x) +#endif + +#endif + +#define TranslateError(err) "JVMTI error" + +#define PASSED 0 +#define FAILED 2 + +static const char *EXC_CNAME = "java/lang/Exception"; + +static jvmtiEnv *jvmti = NULL; +static jint result = PASSED; + +static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); + +JNIEXPORT +jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { + return JNI_VERSION_1_8; +} + +static +jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { + jvmtiCapabilities capabilities; + jint res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), + JVMTI_VERSION_9); + if (res != JNI_OK || jvmti == NULL) { + printf(" Error: wrong result of a valid call to GetEnv!\n"); + return JNI_ERR; + } + + (void)memset(&capabilities, 0, sizeof(capabilities)); + capabilities.can_tag_objects = 1; + capabilities.can_generate_garbage_collection_events = 1; + (*jvmti)->AddCapabilities(jvmti, &capabilities); + + return JNI_OK; +} + +static +void throw_exc(JNIEnv *env, char *msg) { + jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); + jint rt = JNI_OK; + + if (exc_class == NULL) { + printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); + return; + } + rt = JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); + if (rt == JNI_ERR) { + printf("throw_exc: Error in JNI ThrowNew(env, %s)\n", msg); + } +} + +static jint JNICALL heap_iter_callback(jlong class_tag, + jlong size, + jlong* tag_ptr, + jint length, + void* user_data) { + (*((jint*)(user_data)))++; + return JVMTI_VISIT_OBJECTS; +} + +JNIEXPORT jint JNICALL +Java_TestHeapDump_heapdump(JNIEnv *env, jclass cls, jclass filter_cls) { + jvmtiHeapCallbacks callbacks; + jint totalCount = 0; + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + (void)memset(&callbacks, 0, sizeof(callbacks)); + callbacks.heap_iteration_callback = &heap_iter_callback; + (*jvmti)->IterateThroughHeap(jvmti, 0, filter_cls, &callbacks, (const void *)&totalCount); + return totalCount; +} + +#ifdef __cplusplus +} +#endif --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java 2020-01-17 17:11:28.536126157 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestChurnNotifications + * @summary Check that MX notifications are reported for all cycles + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -Dprecise=true + * TestChurnNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -Dprecise=true + * TestChurnNotifications + */ + +/* + * @test TestChurnNotifications + * @summary Check that MX notifications are reported for all cycles + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -Dprecise=false + * TestChurnNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -Dprecise=false + * TestChurnNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * -Dprecise=false + * TestChurnNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * -Dprecise=false + * TestChurnNotifications + */ + +/* + * @test TestChurnNotifications + * @summary Check that MX notifications are reported for all cycles + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -Dprecise=false + * TestChurnNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -Dprecise=false + * TestChurnNotifications + */ + +import java.util.*; +import java.util.concurrent.atomic.*; +import javax.management.*; +import java.lang.management.*; +import javax.management.openmbean.*; + +import com.sun.management.GarbageCollectionNotificationInfo; + +public class TestChurnNotifications { + + static final long HEAP_MB = 128; // adjust for test configuration above + static final long TARGET_MB = Long.getLong("target", 8_000); // 8 Gb allocation + + // Should we track the churn precisely? + // Precise tracking is only reliable when GC is fully stop-the-world. Otherwise, + // we cannot tell, looking at heap used before/after, what was the GC churn. + static final boolean PRECISE = Boolean.getBoolean("precise"); + + static final long M = 1024 * 1024; + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final AtomicLong churnBytes = new AtomicLong(); + + NotificationListener listener = new NotificationListener() { + @Override + public void handleNotification(Notification n, Object o) { + if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { + GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData()); + Map mapBefore = info.getGcInfo().getMemoryUsageBeforeGc(); + Map mapAfter = info.getGcInfo().getMemoryUsageAfterGc(); + + MemoryUsage before = mapBefore.get("Shenandoah"); + MemoryUsage after = mapAfter.get("Shenandoah"); + + if ((before != null) && (after != null)) { + long diff = before.getUsed() - after.getUsed(); + if (diff > 0) { + churnBytes.addAndGet(diff); + } + } + } + } + }; + + for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { + ((NotificationEmitter) bean).addNotificationListener(listener, null, null); + } + + final int size = 100_000; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size); + + long mem = count * (16 + 4 * size); + + for (int c = 0; c < count; c++) { + sink = new int[size]; + } + + System.gc(); + + Thread.sleep(1000); + + long actual = churnBytes.get(); + + long minExpected = PRECISE ? (mem - HEAP_MB * 1024 * 1024) : 1; + long maxExpected = mem + HEAP_MB * 1024 * 1024; + + String msg = "Expected = [" + minExpected / M + "; " + maxExpected / M + "] (" + mem / M + "), actual = " + actual / M; + if (minExpected < actual && actual < maxExpected) { + System.out.println(msg); + } else { + throw new IllegalStateException(msg); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java 2020-01-17 17:11:29.143126124 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestMemoryMXBeans + * @summary Test JMX memory beans + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc + * java.management + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g TestMemoryMXBeans -1 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms1g -Xmx1g TestMemoryMXBeans 1024 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g TestMemoryMXBeans 128 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms1g -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 1024 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 128 1024 + */ + +import java.lang.management.*; +import java.util.*; + +public class TestMemoryMXBeans { + + public static void main(String[] args) throws Exception { + if (args.length < 2) { + throw new IllegalStateException("Should provide expected heap sizes"); + } + + long initSize = 1L * Integer.parseInt(args[0]) * 1024 * 1024; + long maxSize = 1L * Integer.parseInt(args[1]) * 1024 * 1024; + + // wait for GC to uncommit + Thread.sleep(1000); + + testMemoryBean(initSize, maxSize); + } + + public static void testMemoryBean(long initSize, long maxSize) { + MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); + long heapInit = memoryMXBean.getHeapMemoryUsage().getInit(); + long heapCommitted = memoryMXBean.getHeapMemoryUsage().getCommitted(); + long heapMax = memoryMXBean.getHeapMemoryUsage().getMax(); + long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit(); + long nonHeapCommitted = memoryMXBean.getNonHeapMemoryUsage().getCommitted(); + long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax(); + + if (initSize > 0 && heapInit != initSize) { + throw new IllegalStateException("Init heap size is wrong: " + heapInit + " vs " + initSize); + } + if (maxSize > 0 && heapMax != maxSize) { + throw new IllegalStateException("Max heap size is wrong: " + heapMax + " vs " + maxSize); + } + if (initSize > 0 && maxSize > 0 && initSize != maxSize && heapCommitted == heapMax) { + throw new IllegalStateException("Committed heap size is max: " + heapCommitted + + " (init: " + initSize + ", max: " + maxSize + ")"); + } + if (initSize > 0 && maxSize > 0 && initSize == maxSize && heapCommitted != heapMax) { + throw new IllegalStateException("Committed heap size is not max: " + heapCommitted + + " (init: " + initSize + ", max: " + maxSize + ")"); + } + if (initSize > 0 && heapCommitted < initSize) { + throw new IllegalStateException("Committed heap size is below min: " + heapCommitted + + " (init: " + initSize + ", max: " + maxSize + ")"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java 2020-01-17 17:11:29.755126090 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestMemoryPools + * @summary Test JMX memory pools + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @modules java.base/jdk.internal.misc + * java.management + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestMemoryPools + */ + +import java.lang.management.*; +import java.util.*; + +public class TestMemoryPools { + + public static void main(String[] args) throws Exception { + List mms = ManagementFactory.getMemoryManagerMXBeans(); + if (mms == null) { + throw new RuntimeException("getMemoryManagerMXBeans is null"); + } + if (mms.isEmpty()) { + throw new RuntimeException("getMemoryManagerMXBeans is empty"); + } + for (MemoryManagerMXBean mmBean : mms) { + String[] names = mmBean.getMemoryPoolNames(); + if (names == null) { + throw new RuntimeException("getMemoryPoolNames() is null"); + } + if (names.length == 0) { + throw new RuntimeException("getMemoryPoolNames() is empty"); + } + for (String name : names) { + if (name == null) { + throw new RuntimeException("pool name is null"); + } + if (name.length() == 0) { + throw new RuntimeException("pool name is empty"); + } + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java 2020-01-17 17:11:30.363126057 +0100 @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestPauseNotifications + * @summary Check that MX notifications are reported for all cycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestPauseNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestPauseNotifications + */ + +/* + * @test TestPauseNotifications + * @summary Check that MX notifications are reported for all cycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestPauseNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestPauseNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static + * TestPauseNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestPauseNotifications + */ + +/* + * @test TestPauseNotifications + * @summary Check that MX notifications are reported for all cycles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestPauseNotifications + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestPauseNotifications + */ + +import java.util.*; +import java.util.concurrent.atomic.*; +import javax.management.*; +import java.lang.management.*; +import javax.management.openmbean.*; + +import com.sun.management.GarbageCollectionNotificationInfo; + +public class TestPauseNotifications { + + static final long HEAP_MB = 128; // adjust for test configuration above + static final long TARGET_MB = Long.getLong("target", 8_000); // 8 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final AtomicLong pausesDuration = new AtomicLong(); + final AtomicLong cyclesDuration = new AtomicLong(); + + NotificationListener listener = new NotificationListener() { + @Override + public void handleNotification(Notification n, Object o) { + if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { + GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData()); + + System.out.println(info.getGcInfo().toString()); + System.out.println(info.getGcName()); + System.out.println(); + + long d = info.getGcInfo().getDuration(); + + String name = info.getGcName(); + if (name.contains("Shenandoah")) { + if (name.equals("Shenandoah Pauses")) { + pausesDuration.addAndGet(d); + } else if (name.equals("Shenandoah Cycles")) { + cyclesDuration.addAndGet(d); + } else { + throw new IllegalStateException("Unknown name: " + name); + } + } + } + } + }; + + for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { + ((NotificationEmitter) bean).addNotificationListener(listener, null, null); + } + + final int size = 100_000; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size); + + for (int c = 0; c < count; c++) { + sink = new int[size]; + } + + Thread.sleep(1000); + + long pausesActual = pausesDuration.get(); + long cyclesActual = cyclesDuration.get(); + + long minExpected = 1; + long maxExpected = Long.MAX_VALUE; + + { + String msg = "Pauses expected = [" + minExpected + "; " + maxExpected + "], actual = " + pausesActual; + if (minExpected < pausesActual && pausesActual < maxExpected) { + System.out.println(msg); + } else { + throw new IllegalStateException(msg); + } + } + + { + String msg = "Cycles expected = [" + minExpected + "; " + maxExpected + "], actual = " + cyclesActual; + if (minExpected < cyclesActual && cyclesActual < maxExpected) { + System.out.println(msg); + } else { + throw new IllegalStateException(msg); + } + } + + { + String msg = "Cycle duration (" + cyclesActual + "), pause duration (" + pausesActual + ")"; + if (pausesActual < cyclesActual) { + System.out.println(msg); + } else { + throw new IllegalStateException(msg); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java 2020-01-17 17:11:30.974126023 +0100 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestAllocLargeObj + * @summary Test allocation of small object to result OOM, but not to crash JVM + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main TestAllocLargeObj + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestAllocLargeObj { + + static final int SIZE = 1 * 1024 * 1024; + static final int COUNT = 16; + + static volatile Object sink; + + public static void work() throws Exception { + Object[] root = new Object[COUNT]; + sink = root; + for (int c = 0; c < COUNT; c++) { + root[c] = new Object[SIZE]; + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + work(); + return; + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx16m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocLargeObj.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(1); + analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocLargeObj.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java 2020-01-17 17:11:31.591125989 +0100 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestAllocLargerThanHeap + * @summary Test that allocation of the object larger than heap fails predictably + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main TestAllocLargerThanHeap + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestAllocLargerThanHeap { + + static final int SIZE = 16 * 1024 * 1024; + + static volatile Object sink; + + public static void work() throws Exception { + sink = new Object[SIZE]; + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + work(); + return; + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx16m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocLargerThanHeap.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(1); + analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocLargerThanHeap.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java 2020-01-17 17:11:32.187125956 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestAllocSmallObj + * @summary Test allocation of small object to result OOM, but not to crash JVM + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main TestAllocSmallObj + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestAllocSmallObj { + + static final int COUNT = 16 * 1024 * 1024; + + static volatile Object sink; + + public static void work() throws Exception { + Object[] root = new Object[COUNT]; + sink = root; + for (int c = 0; c < COUNT; c++) { + root[c] = new Object(); + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + work(); + return; + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx16m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocSmallObj.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(1); + analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocSmallObj.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java 2020-01-17 17:11:32.790125923 +0100 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestClassLoaderLeak + * @summary Test OOME in due to classloader leak + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main TestClassLoaderLeak + */ + +import java.util.*; +import java.io.*; +import java.nio.*; +import java.nio.file.*; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestClassLoaderLeak { + + static final int SIZE = 1 * 1024 * 1024; + static final int COUNT = 128; + + static volatile Object sink; + + static class Dummy { + static final int[] PAYLOAD = new int[SIZE]; + } + + static class MyClassLoader extends ClassLoader { + final String path; + + MyClassLoader(String path) { + this.path = path; + } + + public Class loadClass(String name) throws ClassNotFoundException { + try { + File f = new File(path, name + ".class"); + if (!f.exists()) { + return super.loadClass(name); + } + + Path path = Paths.get(f.getAbsolutePath()); + byte[] cls = Files.readAllBytes(path); + return defineClass(name, cls, 0, cls.length, null); + } catch (IOException e) { + throw new ClassNotFoundException(name); + } + } + } + + static void load(String path) throws Exception { + ClassLoader cl = new MyClassLoader(path); + Class c = (Class) Class.forName("TestClassLoaderLeak$Dummy", true, cl); + if (c.getClassLoader() != cl) { + throw new IllegalStateException("Should have loaded by target loader"); + } + sink = c; + } + + public static void passWith(String... args) throws Exception { + testWith(true, args); + } + + public static void failWith(String... args) throws Exception { + testWith(false, args); + } + + public static void testWith(boolean shouldPass, String... args) throws Exception { + List pbArgs = new ArrayList<>(); + pbArgs.add("-Xmx128m"); + pbArgs.add("-XX:+UnlockExperimentalVMOptions"); + pbArgs.add("-XX:+UnlockDiagnosticVMOptions"); + pbArgs.add("-XX:+UseShenandoahGC"); + pbArgs.addAll(Arrays.asList(args)); + pbArgs.add(TestClassLoaderLeak.class.getName()); + pbArgs.add("test"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(pbArgs.toArray(new String[0])); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + + if (shouldPass) { + analyzer.shouldHaveExitValue(0); + analyzer.shouldNotContain("java.lang.OutOfMemoryError"); + analyzer.shouldContain("All good"); + } else { + analyzer.shouldHaveExitValue(1); + analyzer.shouldContain("java.lang.OutOfMemoryError"); + analyzer.shouldNotContain("All good"); + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + String classDir = TestClassLoaderLeak.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + for (int c = 0; c < COUNT; c++) { + load(classDir); + } + System.out.println("All good"); + return; + } + + String[][][] modeHeuristics = new String[][][] { + {{"normal"}, {"adaptive", "compact", "static", "aggressive"}}, + {{"traversal"}, {"adaptive", "aggressive"}}, + {{"passive"}, {"passive"}} + }; + + for (String[][] mh : modeHeuristics) { + String mode = mh[0][0]; + String[] heuristics = mh[1]; + for (String h : heuristics) { + // Forceful enabling should work + passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading"); + passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloadingWithConcurrentMark"); + + // Even when concurrent unloading is disabled, Full GC has to recover + passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark"); + passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0"); + passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0"); + + // Should OOME when unloading forcefully disabled, even if local flags try to enable it back + failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading"); + failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark"); + failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1"); + failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1"); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java 2020-01-17 17:11:33.394125890 +0100 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test TestThreadFailure + * @summary Test OOME in separate thread is recoverable + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main TestThreadFailure + */ + +import java.util.*; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestThreadFailure { + + static final int SIZE = 1024; + static final int COUNT = 16; + + static class NastyThread extends Thread { + @Override + public void run() { + List root = new ArrayList(); + while (true) { + root.add(new Object[SIZE]); + } + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + for (int t = 0; t < COUNT; t++) { + Thread thread = new NastyThread(); + thread.start(); + thread.join(); + } + System.out.println("All good"); + return; + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xmx32m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestThreadFailure.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldContain("java.lang.OutOfMemoryError"); + analyzer.shouldContain("All good"); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestAlwaysPreTouch.java 2020-01-17 17:11:34.000125856 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestAlwaysPreTouch + * @summary Check that Shenandoah's TestAlwaysPreTouch does not fire asserts + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xmx1g TestAlwaysPreTouch + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ConcGCThreads=2 -Xmx1g TestAlwaysPreTouch + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ParallelGCThreads=2 -Xmx1g TestAlwaysPreTouch + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xms128m -Xmx1g TestAlwaysPreTouch + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xms1g -Xmx1g TestAlwaysPreTouch + */ + +public class TestAlwaysPreTouch { + + public static void main(String[] args) throws Exception { + // checking the initialization before entering main() + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestArgumentRanges.java 2020-01-17 17:11:34.601125823 +0100 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestArgumentRanges + * @summary Test that Shenandoah arguments are checked for ranges where applicable + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestArgumentRanges + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestArgumentRanges { + public static void main(String[] args) throws Exception { + testRange("ShenandoahGarbageThreshold", 0, 100); + testRange("ShenandoahFreeThreshold", 0, 100); + testRange("ShenandoahAllocationThreshold", 0, 100); + testHeuristics(); + } + + private static void testHeuristics() throws Exception { + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=aggressive", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=static", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCHeuristics=fluff", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Unknown -XX:ShenandoahGCHeuristics option"); + output.shouldHaveExitValue(1); + } + } + + private static void testRange(String option, int min, int max) throws Exception { + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:" + option + "=" + (max + 1), + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(1); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:" + option + "=" + max, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:" + option + "=" + (min - 1), + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(1); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:" + option + "=" + min, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestClassUnloadingArguments.java 2020-01-17 17:11:35.194125791 +0100 @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestClassUnloadingArguments + * @summary Test that loop mining arguments are sane + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run driver TestClassUnloadingArguments + */ + +import java.util.*; + +import jdk.test.lib.Asserts; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestClassUnloadingArguments { + + public static void testWith(String msg, boolean cu, boolean cuConc, String... args) throws Exception { + String[] cmds = Arrays.copyOf(args, args.length + 2); + cmds[args.length] = "-XX:+PrintFlagsFinal"; + cmds[args.length + 1] = "-version"; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + output.shouldContain("ClassUnloading"); + output.shouldContain("ClassUnloadingWithConcurrentMark"); + + Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloading.+?= (.+?) (.+?)", 2), + Boolean.toString(cu), + msg + ", but got wrong ClassUnloading"); + Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloadingWithConcurrentMark.+?= (.+?) (.+?)", 2), + Boolean.toString(cuConc), + msg + ", but got wrong ClassUnloadingWithConcurrentMark"); + } + + public static void main(String[] args) throws Exception { + testDefaultGC(); + testShenandoah(); + } + + public static void testDefaultGC() throws Exception { + testWith("Default GC should have class unloading enabled", + true, true); + + testWith("Default GC should disable everything", + false, false, + "-XX:-ClassUnloading"); + + testWith("Default GC should disable conc unload", + true, false, + "-XX:-ClassUnloadingWithConcurrentMark"); + + testWith("Default GC should not let conc unload to be enabled separately", + false, false, + "-XX:-ClassUnloading", + "-XX:+ClassUnloadingWithConcurrentMark"); + } + + public static void testShenandoah() throws Exception { + testWith("Shenandoah GC should have class unloading enabled", + true, false, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC"); + + testWith("Shenandoah GC should disable everything", + false, false, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:-ClassUnloading"); + + testWith("Shenandoah GC should enable conc unload", + true, true, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:+ClassUnloadingWithConcurrentMark"); + + testWith("Shenandoah GC should not let conc unload to be enabled separately", + false, false, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:-ClassUnloading", + "-XX:+ClassUnloadingWithConcurrentMark"); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestCodeCacheRootStyles.java 2020-01-17 17:11:35.805125757 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestCodeCacheRootStyles + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=0 TestCodeCacheRootStyles + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=1 TestCodeCacheRootStyles + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=2 TestCodeCacheRootStyles + */ + +public class TestCodeCacheRootStyles { + public static void main(String[] args) { + // Bug should crash before we get here. + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestCriticalControlThreadPriority.java 2020-01-17 17:11:36.413125723 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestCriticalControlThreadPriority + * @summary Check that ShenandoahCriticalControlThreadPriority works + * @bug 8217343 + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ShenandoahCriticalControlThreadPriority -Xmx1g TestCriticalControlThreadPriority + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahCriticalControlThreadPriority -Xmx1g TestCriticalControlThreadPriority + */ + +public class TestCriticalControlThreadPriority { + + public static void main(String[] args) throws Exception { + // checking the initialization before entering main() + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestEnabled.java 2020-01-17 17:11:37.021125690 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; + +/* + * @test TestEnabled + * @key gc + * @requires vm.gc.Shenandoah & vm.gc == "null" & !vm.graal.enabled + * @run main/othervm -Dexpected=false -Xmx64m TestEnabled + * @run main/othervm -Dexpected=true -Xmx64m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestEnabled + */ + +/* + * @test TestEnabledAlready + * @key gc + * @requires vm.gc.Shenandoah & vm.gc == "Shenandoah" & !vm.graal.enabled + * @run main/othervm -Dexpected=true -Xmx64m TestEnabled + */ +public class TestEnabled { + + public static void main(String... args) { + boolean expected = Boolean.getBoolean("expected"); + boolean actual = isEnabled(); + if (expected != actual) { + throw new IllegalStateException("Error: expected = " + expected + ", actual = " + actual); + } + } + + public static boolean isEnabled() { + for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { + if (bean.getName().contains("Shenandoah")) { + return true; + } + } + return false; + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGC.java 2020-01-17 17:11:37.630125656 +0100 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestExplicitGC + * @summary Test that Shenandoah reacts to explicit GC flags appropriately + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestExplicitGC + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestExplicitGC { + + enum Mode { + PRODUCT, + DIAGNOSTIC, + EXPERIMENTAL, + } + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + System.out.println("Calling System.gc()"); + System.gc(); + return; + } + + String[] full = new String[] { + "Pause Full" + }; + + String[] concNormal = new String[] { + "Pause Init Mark", + "Pause Final Mark", + }; + + String[] concTraversal = new String[] { + "Pause Init Traversal", + "Pause Final Traversal", + }; + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + TestExplicitGC.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : full) { + output.shouldNotContain(p); + } + for (String p : concNormal) { + output.shouldContain(p); + } + for (String p : concTraversal) { + output.shouldNotContain(p); + } + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + "-XX:+DisableExplicitGC", + TestExplicitGC.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : full) { + output.shouldNotContain(p); + } + for (String p : concNormal) { + output.shouldNotContain(p); + } + for (String p : concTraversal) { + output.shouldNotContain(p); + } + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + "-XX:+ExplicitGCInvokesConcurrent", + TestExplicitGC.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : full) { + output.shouldNotContain(p); + } + for (String p : concNormal) { + output.shouldContain(p); + } + for (String p : concTraversal) { + output.shouldNotContain(p); + } + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + "-XX:+ExplicitGCInvokesConcurrent", + "-XX:ShenandoahGCMode=traversal", + TestExplicitGC.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : full) { + output.shouldNotContain(p); + } + for (String p : concNormal) { + output.shouldNotContain(p); + } + for (String p : concTraversal) { + output.shouldContain(p); + } + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + "-XX:-ExplicitGCInvokesConcurrent", + TestExplicitGC.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : full) { + output.shouldContain(p); + } + for (String p : concNormal) { + output.shouldNotContain(p); + } + for (String p : concTraversal) { + output.shouldNotContain(p); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGCNoConcurrent.java 2020-01-17 17:11:38.235125623 +0100 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestExplicitGCNoConcurrent + * @summary Test that Shenandoah reacts to explicit GC flags appropriately + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestExplicitGCNoConcurrent + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestExplicitGCNoConcurrent { + + public static void main(String[] args) throws Exception { + if (args.length > 0) { + System.out.println("Calling System.gc()"); + System.gc(); + return; + } + + String[] concurrent = new String[] { + "Pause Init Mark", + "Pause Final Mark", + "Pause Init Update Refs", + "Pause Final Update Refs", + "Pause Init Traversal", + "Pause Final Traversal", + }; + + String[] opts = new String[] { + "", + "-XX:-ExplicitGCInvokesConcurrent", + "-XX:+ExplicitGCInvokesConcurrent" + }; + + for (String opt : opts) { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + opt, + "-XX:ShenandoahGCHeuristics=passive", + TestExplicitGCNoConcurrent.class.getName(), + "test"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + for (String p : concurrent) { + output.shouldNotContain(p); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestHeuristicsUnlock.java 2020-01-17 17:11:38.843125589 +0100 @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestHeuristicsUnlock + * @summary Test that Shenandoah heuristics are unlocked properly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestHeuristicsUnlock + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestHeuristicsUnlock { + + enum Mode { + PRODUCT, + DIAGNOSTIC, + EXPERIMENTAL, + } + + public static void main(String[] args) throws Exception { + testWith("-XX:ShenandoahGCHeuristics=adaptive", Mode.PRODUCT); + testWith("-XX:ShenandoahGCHeuristics=static", Mode.PRODUCT); + testWith("-XX:ShenandoahGCHeuristics=compact", Mode.PRODUCT); + + testWith("-XX:ShenandoahGCMode=traversal", Mode.PRODUCT); + + testWith("-XX:ShenandoahGCHeuristics=aggressive", Mode.DIAGNOSTIC); + testWith("-XX:ShenandoahGCMode=passive", Mode.DIAGNOSTIC); + } + + private static void testWith(String h, Mode mode) throws Exception { + if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UnlockDiagnosticVMOptions", + "-XX:-UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + h, + "-version" + ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + switch (mode) { + case PRODUCT: + output.shouldHaveExitValue(0); + break; + case DIAGNOSTIC: + case EXPERIMENTAL: + output.shouldNotHaveExitValue(0); + break; + } + } + + if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:-UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + h, + "-version" + ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + switch (mode) { + case PRODUCT: + case DIAGNOSTIC: + output.shouldHaveExitValue(0); + break; + case EXPERIMENTAL: + output.shouldNotHaveExitValue(0); + break; + } + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + h, + "-version" + ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + switch (mode) { + case PRODUCT: + case EXPERIMENTAL: + output.shouldHaveExitValue(0); + break; + case DIAGNOSTIC: + output.shouldNotHaveExitValue(0); + break; + } + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousMoves.java 2020-01-17 17:11:39.446125556 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestHumongousMoves + * @summary Check Shenandoah reacts on setting humongous moves correctly + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahHumongousMoves + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestHumongousMoves + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahHumongousMoves + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestHumongousMoves + */ + +import java.util.Random; + +public class TestHumongousMoves { + + static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + final int min = 0; + final int max = 384 * 1024; + long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); + + Random r = new Random(); + for (long c = 0; c < count; c++) { + sink = new int[min + r.nextInt(max - min)]; + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java 2020-01-17 17:11:40.055125523 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestHumongousThresholdArgs + * @summary Test that Shenandoah humongous threshold args are checked + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestHumongousThresholdArgs + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestHumongousThresholdArgs { + public static void main(String[] args) throws Exception { + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + int[] valid = new int[] {1, 10, 50, 90, 100}; + int[] invalid = new int[] {-100, -1, 0, 101, 1000}; + + for (int v : valid) { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahHumongousThreshold=" + v, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + for (int v : invalid) { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahHumongousThreshold=" + v, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(1); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestLoopMiningArguments.java 2020-01-17 17:11:40.662125489 +0100 @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestLoopMiningArguments + * @summary Test that loop mining arguments are sane + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" + * @library /test/lib + * @run driver TestLoopMiningArguments + */ + +import java.util.*; + +import jdk.test.lib.Asserts; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestLoopMiningArguments { + + public static void testWith(String msg, boolean cls, int iters, String... args) throws Exception { + String[] cmds = Arrays.copyOf(args, args.length + 2); + cmds[args.length] = "-XX:+PrintFlagsFinal"; + cmds[args.length + 1] = "-version"; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + output.shouldContain("UseCountedLoopSafepoints"); + output.shouldContain("LoopStripMiningIter"); + + Asserts.assertEQ(output.firstMatch("(.+?) UseCountedLoopSafepoints.+?= (.+?) (.+?)", 2), Boolean.toString(cls), msg + ", but got wrong CLS"); + Asserts.assertEQ(output.firstMatch("(.+?) LoopStripMiningIter.+?= (.+?) (.+?)", 2), String.valueOf(iters), msg + ", but got wrong LSM"); + } + + public static void main(String[] args) throws Exception { + testWith("Shenandoah should have CLS and LSM enabled", + true, 1000, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC" + ); + + testWith("Shenandoah with +CLS should set LSM = 1", + true, 1, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:+UseCountedLoopSafepoints" + ); + + testWith("Shenandoah GC with +CLS should not override LSM>1", + true, 10, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:LoopStripMiningIter=10", + "-XX:+UseCountedLoopSafepoints" + ); + + testWith("Shenandoah GC with +CLS should not override LSM=1", + true, 1, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:LoopStripMiningIter=1", + "-XX:+UseCountedLoopSafepoints" + ); + + testWith("Shenandoah GC with +CLS should override LSM=0 to 1", + true, 1, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:LoopStripMiningIter=0", + "-XX:+UseCountedLoopSafepoints" + ); + + testWith("Shenandoah GC with -CLS should set LSM = 0", + false, 0, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:-UseCountedLoopSafepoints" + ); + + testWith("Shenandoah GC with -CLS should override LSM to 0", + false, 0, + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:LoopStripMiningIter=10", + "-XX:-UseCountedLoopSafepoints" + ); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestObjectAlignment.java 2020-01-17 17:11:41.268125456 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestObjectAlignment + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx32m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx64m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx256m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g TestObjectAlignment + */ + +/* + * @test TestObjectAlignment + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64") + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx16m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx32m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx64m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx128m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx256m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx512m TestObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx1g TestObjectAlignment + */ + +public class TestObjectAlignment { + + public static void main(String[] args) throws Exception { + // Testing the checking code on startup + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestPacing.java 2020-01-17 17:11:41.874125422 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestPacing + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ShenandoahPacing -Xmx128m TestPacing + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahPacing -Xmx128m TestPacing + */ + +public class TestPacing { + static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestParallelRegionStride.java 2020-01-17 17:11:42.471125390 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestParallelRegionStride + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1 -Xmx128m TestParallelRegionStride + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=10 -Xmx128m TestParallelRegionStride + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=100 -Xmx128m TestParallelRegionStride + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1024 -Xmx128m TestParallelRegionStride + */ + +public class TestParallelRegionStride { + static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestRegionSizeArgs.java 2020-01-17 17:11:43.080125356 +0100 @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestRegionSizeArgs + * @summary Test that Shenandoah region size args are checked + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestRegionSizeArgs + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestRegionSizeArgs { + public static void main(String[] args) throws Exception { + testInvalidRegionSizes(); + testMinRegionSize(); + testMaxRegionSize(); + } + + private static void testInvalidRegionSizes() throws Exception { + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms4m", + "-Xmx1g", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms8m", + "-Xmx1g", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=200m", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=9m", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=255K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=260K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms1g", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=32M", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms1g", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=64M", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms1g", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=256K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms1g", + "-Xmx1g", + "-XX:ShenandoahHeapRegionSize=128K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option"); + output.shouldHaveExitValue(1); + } + } + + private static void testMinRegionSize() throws Exception { + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMinRegionSize=255K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMinRegionSize=1M", + "-XX:ShenandoahMaxRegionSize=260K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize"); + output.shouldHaveExitValue(1); + } + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMinRegionSize=200m", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMinRegionSize=9m", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + } + + private static void testMaxRegionSize() throws Exception { + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMaxRegionSize=255K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahMaxRegionSize option"); + output.shouldHaveExitValue(1); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-Xms100m", + "-Xmx1g", + "-XX:ShenandoahMinRegionSize=1M", + "-XX:ShenandoahMaxRegionSize=260K", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize"); + output.shouldHaveExitValue(1); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestSafepointWorkers.java 2020-01-17 17:11:43.685125323 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestSingleSafepointWorker + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelSafepointThreads=1 -Xmx128m TestSafepointWorkers + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelSafepointThreads=2 -Xmx128m TestSafepointWorkers + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelSafepointThreads=4 -Xmx128m TestSafepointWorkers + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelSafepointThreads=8 -Xmx128m TestSafepointWorkers + */ + +public class TestSafepointWorkers { + static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestSelectiveBarrierFlags.java 2020-01-17 17:11:44.290125289 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestSelectiveBarrierFlags + * @summary Test selective barrier enabling works, by aggressively compiling HelloWorld with combinations + * of barrier flags + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main/othervm TestSelectiveBarrierFlags -Xint + * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:TieredStopAtLevel=1 + * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers + */ + +import java.util.*; +import java.util.concurrent.*; + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestSelectiveBarrierFlags { + + public static void main(String[] args) throws Exception { + String[][] opts = { + new String[] { "ShenandoahKeepAliveBarrier" }, + new String[] { "ShenandoahLoadRefBarrier" }, + new String[] { "ShenandoahSATBBarrier", "ShenandoahStoreValEnqueueBarrier" }, + new String[] { "ShenandoahCASBarrier" }, + new String[] { "ShenandoahCloneBarrier" }, + }; + + int size = 1; + for (String[] l : opts) { + size *= (l.length + 1); + } + + ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); + + for (int c = 0; c < size; c++) { + int t = c; + + List conf = new ArrayList<>(); + conf.addAll(Arrays.asList(args)); + conf.add("-Xmx128m"); + conf.add("-XX:+UnlockDiagnosticVMOptions"); + conf.add("-XX:+UnlockExperimentalVMOptions"); + conf.add("-XX:+UseShenandoahGC"); + conf.add("-XX:ShenandoahGCMode=passive"); + + StringBuilder sb = new StringBuilder(); + for (String[] l : opts) { + // Make a choice which flag to select from the group. + // Zero means no flag is selected from the group. + int choice = t % (l.length + 1); + for (int e = 0; e < l.length; e++) { + conf.add("-XX:" + ((choice == (e + 1)) ? "+" : "-") + l[e]); + } + t = t / (l.length + 1); + } + + conf.add("TestSelectiveBarrierFlags$Test"); + + pool.submit(() -> { + try { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(conf.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + }); + } + + pool.shutdown(); + pool.awaitTermination(1, TimeUnit.HOURS); + } + + public static class Test { + public static void main(String... args) { + System.out.println("HelloWorld"); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestSingleThreaded.java 2020-01-17 17:11:44.893125256 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestSingleThreaded + * @summary test single worker threaded Shenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:ParallelGCThreads=1 -XX:ConcGCThreads=1 TestSingleThreaded + */ + +public class TestSingleThreaded { + + public static void main(String[] args) { + // Bug should crash before we get here. + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestThreadCounts.java 2020-01-17 17:11:45.493125223 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestThreadCounts + * @summary Test that Shenandoah GC thread counts are handled well + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestThreadCounts + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestThreadCounts { + public static void main(String[] args) throws Exception { + for (int conc = 0; conc < 16; conc++) { + for (int par = 0; par < 16; par++) { + testWith(conc, par); + } + } + } + + private static void testWith(int conc, int par) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ConcGCThreads=" + conc, + "-XX:ParallelGCThreads=" + par, + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + if (conc == 0) { + output.shouldContain("Shenandoah expects ConcGCThreads > 0"); + output.shouldHaveExitValue(1); + } else if (par == 0) { + output.shouldContain("Shenandoah expects ParallelGCThreads > 0"); + output.shouldHaveExitValue(1); + } else if (conc > par) { + output.shouldContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads"); + output.shouldHaveExitValue(1); + } else { + output.shouldNotContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads"); + output.shouldHaveExitValue(0); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestThreadCountsOverride.java 2020-01-17 17:11:46.096125190 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestThreadCountsOverride + * @summary Test that Shenandoah GC thread counts are overridable + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestThreadCountsOverride + */ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestThreadCountsOverride { + public static void main(String[] args) throws Exception { + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ParallelGCThreads=1", + "-XX:+PrintFlagsFinal", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldMatch("ParallelGCThreads(.*)= 1 "); + output.shouldHaveExitValue(0); + } + + { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ConcGCThreads=1", + "-XX:+PrintFlagsFinal", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldMatch("ConcGCThreads(.*)= 1 "); + output.shouldHaveExitValue(0); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/shenandoah/options/TestWrongBarrierDisable.java 2020-01-17 17:11:46.702125156 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* @test TestWrongBarrierDisable + * @summary Test that disabling wrong barriers fails early + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @library /test/lib + * @run main/othervm TestWrongBarrierDisable + */ + +import java.util.*; + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestWrongBarrierDisable { + + public static void main(String[] args) throws Exception { + String[] concurrent = { + "ShenandoahLoadRefBarrier", + "ShenandoahCASBarrier", + "ShenandoahCloneBarrier", + "ShenandoahSATBBarrier", + "ShenandoahKeepAliveBarrier", + }; + + String[] traversal = { + "ShenandoahLoadRefBarrier", + "ShenandoahCASBarrier", + "ShenandoahCloneBarrier", + }; + + shouldFailAll("-XX:ShenandoahGCHeuristics=adaptive", concurrent); + shouldFailAll("-XX:ShenandoahGCHeuristics=static", concurrent); + shouldFailAll("-XX:ShenandoahGCHeuristics=compact", concurrent); + shouldFailAll("-XX:ShenandoahGCHeuristics=aggressive", concurrent); + shouldFailAll("-XX:ShenandoahGCMode=traversal", traversal); + shouldPassAll("-XX:ShenandoahGCMode=passive", concurrent); + shouldPassAll("-XX:ShenandoahGCMode=passive", traversal); + } + + private static void shouldFailAll(String h, String[] barriers) throws Exception { + for (String b : barriers) { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + h, + "-XX:-" + b, + "-version" + ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotHaveExitValue(0); + output.shouldContain("Heuristics needs "); + output.shouldContain("to work correctly"); + } + } + + private static void shouldPassAll(String h, String[] barriers) throws Exception { + for (String b : barriers) { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + h, + "-XX:-" + b, + "-version" + ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/startup_warnings/TestShenandoah.java 2020-01-17 17:11:47.316125123 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* +* @test TestShenandoah +* @key gc +* @requires vm.gc.Shenandoah & !vm.graal.enabled +* @bug 8006398 +* @summary Test that the Shenandoah collector does not print a warning message +* @library /test/lib +* @modules java.base/jdk.internal.misc +* java.management +*/ + +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestShenandoah { + + public static void main(String args[]) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+UseShenandoahGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("deprecated"); + output.shouldNotContain("error"); + output.shouldHaveExitValue(0); + } + +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithShenandoah.java 2020-01-17 17:11:47.922125089 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +import java.io.IOException; + +/* + * @test TestGCBasherWithShenandoah + * @key gc + * @key stress + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled + * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahVerify -XX:+ShenandoahDegeneratedGC + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahVerify -XX:-ShenandoahDegeneratedGC + * TestGCBasherWithShenandoah 120000 + */ + +/* + * @test TestGCBasherWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled + * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestGCBasherWithShenandoah 120000 + */ + +/* + * @test TestGCBasherWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled + * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestGCBasherWithShenandoah 120000 + */ + +/* + * @test TestGCBasherWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled + * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestGCBasherWithShenandoah 120000 + */ + +/* + * @test TestGCBasherWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled + * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestGCBasherWithShenandoah 120000 + * + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestGCBasherWithShenandoah 120000 + */ +public class TestGCBasherWithShenandoah { + public static void main(String[] args) throws IOException { + TestGCBasher.main(args); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithShenandoah.java 2020-01-17 17:11:48.533125056 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestGCLockerWithShenandoah + * @key gc + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress Shenandoah's JNI handling by calling GetPrimitiveArrayCritical while concurrently filling up old gen. + * + * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestGCLockerWithShenandoah + * + * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC + * TestGCLockerWithShenandoah + * + * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestGCLockerWithShenandoah + * + * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestGCLockerWithShenandoah + */ +public class TestGCLockerWithShenandoah { + public static void main(String[] args) { + String[] testArgs = {"2", "Shenandoah heap"}; + TestGCLocker.main(testArgs); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithShenandoah.java 2020-01-17 17:11:49.145125022 +0100 @@ -0,0 +1,125 @@ +/* +* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestGCOldWithShenandoah + * @key gc + * @key stress + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress the GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:+ShenandoahDegeneratedGC + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive + * -XX:-ShenandoahDegeneratedGC + * TestGCOld 50 1 20 10 10000 + */ + +/* + * @test TestGCOldWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress the GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * -XX:+ShenandoahVerify + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact + * TestGCOld 50 1 20 10 10000 + */ + +/* + * @test TestGCOldWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress the GC by trying to make old objects more likely to be garbage than young objects. + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahOOMDuringEvacALot + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * -XX:+ShenandoahAllocFailureALot + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestGCOld 50 1 20 10 10000 + * + * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * TestGCOld 50 1 20 10 10000 + */ + +public class TestGCOldWithShenandoah { + + public static void main(String[] args) { + TestGCOld.main(args); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java 2020-01-17 17:11:49.754124988 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test TestSystemGCWithShenandoah + * @key gc + * @key stress + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress the Shenandoah GC full GC by allocating objects of different lifetimes concurrently with System.gc(). + * + * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC + * -XX:+ShenandoahVerify + * TestSystemGCWithShenandoah 270 + * + * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC + * TestSystemGCWithShenandoah 270 + */ + +/* + * @test TestSystemGCWithShenandoah + * @key gc + * @key stress + * @library / + * @requires vm.gc.Shenandoah & !vm.graal.enabled + * @summary Stress the Shenandoah GC full GC by allocating objects of different lifetimes concurrently with System.gc(). + * + * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal + * -XX:+ShenandoahVerify + * TestSystemGCWithShenandoah 270 + * + */ +public class TestSystemGCWithShenandoah { + public static void main(String[] args) throws Exception { + TestSystemGC.main(args); + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/jdk/jdk/jfr/event/gc/detailed/TestShenandoahHeapRegionInformationEvent.java 2020-01-17 17:11:50.369124954 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package jdk.jfr.event.gc.detailed; + +import java.nio.file.Paths; +import java.util.List; + +import jdk.jfr.EventType; +import jdk.jfr.FlightRecorder; +import jdk.jfr.Recording; +import jdk.jfr.consumer.RecordedEvent; +import jdk.test.lib.Asserts; +import jdk.test.lib.jfr.EventNames; +import jdk.test.lib.jfr.Events; +import jdk.test.lib.jfr.GCHelper; + +/** + * @test + * @bug 8221507 + * @requires vm.hasJFR + * @requires vm.gc == "Shenandoah" | vm.gc == null + * @key jfr + * @library /test/lib /test/jdk + * @run main/othervm -Xmx32m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGarbageThreshold=1 jdk.jfr.event.gc.detailed.TestShenandoahHeapRegionInformationEvent + */ + + +public class TestShenandoahHeapRegionInformationEvent { + private final static String EVENT_NAME = EventNames.ShenandoahHeapRegionInformation; + public static void main(String[] args) throws Exception { + try (Recording recording = new Recording()) { + // activate the event we are interested in and start recording + for (EventType t : FlightRecorder.getFlightRecorder().getEventTypes()) { + System.out.println(t.getName()); + } + recording.enable(EVENT_NAME); + recording.start(); + recording.stop(); + + // Verify recording + List events = Events.fromRecording(recording); + Events.hasEvents(events); + for (RecordedEvent event : events) { + Events.assertField(event, "index").notEqual(-1); + GCHelper.assertIsValidShenandoahHeapRegionState(Events.assertField(event, "state").getValue()); + Events.assertField(event, "used").atMost(1L*1024*1024); + } + } + } +} --- /dev/null 2020-01-17 11:46:19.065201212 +0100 +++ new/test/jdk/jdk/jfr/event/gc/detailed/TestShenandoahHeapRegionStateChangeEvent.java 2020-01-17 17:11:50.983124921 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package jdk.jfr.event.gc.detailed; + +import java.nio.file.Paths; +import java.time.Duration; +import java.util.List; + +import jdk.jfr.Recording; +import jdk.jfr.consumer.RecordedEvent; +import jdk.test.lib.Asserts; +import jdk.test.lib.jfr.EventNames; +import jdk.test.lib.jfr.Events; +import jdk.test.lib.jfr.GCHelper; + +/** + * @test + * @bug 8221507 + * @requires vm.hasJFR + * @requires vm.gc == "Shenandoah" | vm.gc == null + * @key jfr + * @library /test/lib /test/jdk + * @run main/othervm -Xmx32m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGarbageThreshold=1 jdk.jfr.event.gc.detailed.TestShenandoahHeapRegionStateChangeEvent + */ + +public class TestShenandoahHeapRegionStateChangeEvent { + private final static String EVENT_NAME = EventNames.ShenandoahHeapRegionStateChange; + + public static void main(String[] args) throws Exception { + try (Recording recording = new Recording()) { + // activate the event we are interested in and start recording + recording.enable(EVENT_NAME).withThreshold(Duration.ofMillis(0)); + recording.start(); + + byte[][] array = new byte[1024][]; + for (int i = 0; i < array.length; i++) { + array[i] = new byte[20 * 1024]; + } + recording.stop(); + + // Verify recording + List events = Events.fromRecording(recording); + Asserts.assertFalse(events.isEmpty(), "No events found"); + + for (RecordedEvent event : events) { + Events.assertField(event, "index").notEqual(-1); + GCHelper.assertIsValidShenandoahHeapRegionState(Events.assertField(event, "from").getValue()); + GCHelper.assertIsValidShenandoahHeapRegionState(Events.assertField(event, "to").getValue()); + Events.assertField(event, "used").atMost(1L*1024*1024); + } + } + } +}