--- old/make/bsd/makefiles/buildtree.make 2020-01-16 16:56:25.283633980 +0300 +++ new/make/bsd/makefiles/buildtree.make 2020-01-16 16:56:25.231635658 +0300 @@ -125,9 +125,13 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ - $(PLATFORM_DIR)/generated/jfrfiles \ $(PLATFORM_DIR)/generated/dtracefiles +ifeq ($(ENABLE_JFR), true) +SIMPLE_DIRS += \ + $(PLATFORM_DIR)/generated/jfrfiles +endif + TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -135,7 +139,10 @@ BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X) -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make dtrace.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make dtrace.make +ifeq ($(ENABLE_JFR), true) +BUILDTREE_TARGETS += jfr.make +endif BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) --- old/make/bsd/makefiles/rules.make 2020-01-16 16:56:25.483627526 +0300 +++ new/make/bsd/makefiles/rules.make 2020-01-16 16:56:25.435629075 +0300 @@ -126,8 +126,14 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac +ifeq ($(ENABLE_JFR), true) +# JFR source code is consciously left as close as possible to later versions of JDK. hence need at least lambda support BOOT_SOURCE_LANGUAGE_VERSION = 8 BOOT_TARGET_CLASS_VERSION = 8 +else +BOOT_SOURCE_LANGUAGE_VERSION = 7 +BOOT_TARGET_CLASS_VERSION = 7 +endif JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/bsd/makefiles/top.make 2020-01-16 16:56:25.675621330 +0300 +++ new/make/bsd/makefiles/top.make 2020-01-16 16:56:25.631622750 +0300 @@ -79,8 +79,10 @@ default: vm_build_preliminaries the_vm @echo All done. +Jfr_Stuff_If_Required = $(if $(findstring true,$(ENABLE_JFR)),jfr_stuff,) + # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff dtrace_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff $(Jfr_Stuff_If_Required) sa_stuff dtrace_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) --- old/make/bsd/makefiles/vm.make 2020-01-16 16:56:25.871615005 +0300 +++ new/make/bsd/makefiles/vm.make 2020-01-16 16:56:25.827616425 +0300 @@ -178,7 +178,10 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles +CORE_PATHS+=$(GENERATED)/jvmtifiles +ifeq ($(ENABLE_JFR), true) +CORE_PATHS+=$(GENERATED)/jfrfiles +endif COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/linux/makefiles/buildtree.make 2020-01-16 16:56:26.067608681 +0300 +++ new/make/linux/makefiles/buildtree.make 2020-01-16 16:56:26.019610229 +0300 @@ -122,8 +122,12 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ - $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/jvmtifiles + +ifeq ($(ENABLE_JFR), true) +SIMPLE_DIRS += \ $(PLATFORM_DIR)/generated/jfrfiles +endif TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -131,7 +135,10 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make +ifeq ($(ENABLE_JFR), true) +BUILDTREE_TARGETS += jfr.make +endif BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) --- old/make/linux/makefiles/rules.make 2020-01-16 16:56:26.263602355 +0300 +++ new/make/linux/makefiles/rules.make 2020-01-16 16:56:26.215603905 +0300 @@ -126,8 +126,14 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac +ifeq ($(ENABLE_JFR), true) +# JFR source code is consciously left as close as possible to later versions of JDK. hence need at least lambda support BOOT_SOURCE_LANGUAGE_VERSION = 8 BOOT_TARGET_CLASS_VERSION = 8 +else +BOOT_SOURCE_LANGUAGE_VERSION = 7 +BOOT_TARGET_CLASS_VERSION = 7 +endif JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/linux/makefiles/top.make 2020-01-16 16:56:26.459596031 +0300 +++ new/make/linux/makefiles/top.make 2020-01-16 16:56:26.411597579 +0300 @@ -79,8 +79,10 @@ default: vm_build_preliminaries the_vm @echo All done. +Jfr_Stuff_If_Required = $(if $(findstring true,$(ENABLE_JFR)),jfr_stuff,) + # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff $(Jfr_Stuff_If_Required) sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) --- old/make/linux/makefiles/vm.make 2020-01-16 16:56:26.655589706 +0300 +++ new/make/linux/makefiles/vm.make 2020-01-16 16:56:26.607591255 +0300 @@ -163,7 +163,10 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles +CORE_PATHS+=$(GENERATED)/jvmtifiles +ifeq ($(ENABLE_JFR), true) +CORE_PATHS+=$(GENERATED)/jfrfiles +endif COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/solaris/makefiles/buildtree.make 2020-01-16 16:56:26.851583381 +0300 +++ new/make/solaris/makefiles/buildtree.make 2020-01-16 16:56:26.803584930 +0300 @@ -114,7 +114,11 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ + +ifeq ($(ENABLE_JFR), true) +SIMPLE_DIRS += \ $(PLATFORM_DIR)/generated/jfrfiles +endif TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -122,7 +126,10 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make +ifeq ($(ENABLE_JFR), true) +BUILDTREE_TARGETS += jfr.make +endif BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) --- old/make/solaris/makefiles/rules.make 2020-01-16 16:56:27.043577185 +0300 +++ new/make/solaris/makefiles/rules.make 2020-01-16 16:56:26.999578605 +0300 @@ -118,8 +118,14 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac +ifeq ($(ENABLE_JFR), true) +# JFR source code is consciously left as close as possible to later versions of JDK. hence need at least lambda support BOOT_SOURCE_LANGUAGE_VERSION = 8 BOOT_TARGET_CLASS_VERSION = 8 +else +BOOT_SOURCE_LANGUAGE_VERSION = 7 +BOOT_TARGET_CLASS_VERSION = 7 +endif JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/solaris/makefiles/top.make 2020-01-16 16:56:27.239570860 +0300 +++ new/make/solaris/makefiles/top.make 2020-01-16 16:56:27.191572409 +0300 @@ -72,8 +72,10 @@ default: vm_build_preliminaries the_vm @echo All done. +Jfr_Stuff_If_Required = $(if $(findstring true,$(ENABLE_JFR)),jfr_stuff,) + # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff $(Jfr_Stuff_If_Required) sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) --- old/make/solaris/makefiles/vm.make 2020-01-16 16:56:27.443564278 +0300 +++ new/make/solaris/makefiles/vm.make 2020-01-16 16:56:27.399565697 +0300 @@ -177,7 +177,10 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles +CORE_PATHS+=$(GENERATED)/jvmtifiles +ifeq ($(ENABLE_JFR), true) +CORE_PATHS+=$(GENERATED)/jfrfiles +endif COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/windows/makefiles/generated.make 2020-01-16 16:56:27.643557824 +0300 +++ new/make/windows/makefiles/generated.make 2020-01-16 16:56:27.591559502 +0300 @@ -30,9 +30,11 @@ JvmtiOutDir=jvmtifiles !include $(WorkSpace)/make/windows/makefiles/jvmti.make +!if "$(ENABLE_JFR)" == "true" # Pick up rules for building JFR JfrOutDir=jfrfiles !include $(WorkSpace)/make/windows/makefiles/jfr.make +!endif # Pick up rules for building SA !include $(WorkSpace)/make/windows/makefiles/sa.make --- old/make/windows/makefiles/rules.make 2020-01-16 16:56:27.835551628 +0300 +++ new/make/windows/makefiles/rules.make 2020-01-16 16:56:27.787553177 +0300 @@ -44,8 +44,14 @@ !endif # Settings for javac +!if "$(ENABLE_JFR)" == "true" +# JFR source code is consciously left as close as possible to later versions of JDK. hence need at least lambda support BOOT_SOURCE_LANGUAGE_VERSION=8 BOOT_TARGET_CLASS_VERSION=8 +!else +BOOT_SOURCE_LANGUAGE_VERSION = 7 +BOOT_TARGET_CLASS_VERSION = 7 +!endif JAVAC_FLAGS=-g -encoding ascii BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/src/share/vm/c1/c1_GraphBuilder.cpp 2020-01-16 16:56:28.039545045 +0300 +++ new/src/share/vm/c1/c1_GraphBuilder.cpp 2020-01-16 16:56:27.987546723 +0300 @@ -34,7 +34,9 @@ #include "ci/ciMemberName.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "runtime/sharedRuntime.hpp" #include "runtime/compilationPolicy.hpp" #include "utilities/bitMap.inline.hpp" @@ -4403,6 +4405,7 @@ } +#if INCLUDE_JFR static void post_inlining_event(EventCompilerInlining* event, int compile_id, const char* msg, @@ -4426,6 +4429,7 @@ event->set_callee(callee_struct); event->commit(); } +#endif void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { CompileLog* log = compilation()->log(); @@ -4443,10 +4447,12 @@ } } +#if INCLUDE_JFR EventCompilerInlining event; if (event.should_commit()) { post_inlining_event(&event, compilation()->env()->task()->compile_id(), msg, success, bci(), method(), callee); } +#endif if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) { return; --- old/src/share/vm/c1/c1_LIRGenerator.hpp 2020-01-16 16:56:28.275537429 +0300 +++ new/src/share/vm/c1/c1_LIRGenerator.hpp 2020-01-16 16:56:28.227538978 +0300 @@ -28,7 +28,9 @@ #include "c1/c1_Instruction.hpp" #include "c1/c1_LIR.hpp" #include "ci/ciMethodData.hpp" +#if INCLUDE_JFR #include "jfr/support/jfrIntrinsics.hpp" +#endif #include "utilities/sizes.hpp" // The classes responsible for code emission and register allocation --- old/src/share/vm/c1/c1_Runtime1.cpp 2020-01-16 16:56:28.475530976 +0300 +++ new/src/share/vm/c1/c1_Runtime1.cpp 2020-01-16 16:56:28.431532395 +0300 @@ -41,7 +41,9 @@ #include "gc_interface/collectedHeap.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" +#if INCLUDE_JFR #include "jfr/support/jfrIntrinsics.hpp" +#endif #include "memory/allocation.inline.hpp" #include "memory/barrierSet.hpp" #include "memory/oopFactory.hpp" --- old/src/share/vm/ci/ciEnv.cpp 2020-01-16 16:56:28.679524393 +0300 +++ new/src/share/vm/ci/ciEnv.cpp 2020-01-16 16:56:28.631525942 +0300 @@ -40,7 +40,9 @@ #include "compiler/compilerOracle.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/linkResolver.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.inline.hpp" --- old/src/share/vm/classfile/systemDictionary.cpp 2020-01-16 16:56:28.883517810 +0300 +++ new/src/share/vm/classfile/systemDictionary.cpp 2020-01-16 16:56:28.835519359 +0300 @@ -38,8 +38,10 @@ #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/interpreter.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" #include "jfr/jni/jfrUpcalls.hpp" +#endif #include "memory/filemap.hpp" #include "memory/gcLocker.hpp" #include "memory/oopFactory.hpp" @@ -603,11 +605,11 @@ return (nh); } +#if INCLUDE_JFR // utility function for class load event static void post_class_load_event(EventClassLoad &event, instanceKlassHandle k, Handle initiating_loader) { -#if INCLUDE_JFR if (event.should_commit()) { event.set_loadedClass(k()); event.set_definingClassLoader(k->class_loader_data()); @@ -617,8 +619,8 @@ (ClassLoaderData*)NULL); event.commit(); } -#endif // INCLUDE_JFR } +#endif // INCLUDE_JFR Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, @@ -627,7 +629,9 @@ assert(name != NULL && !FieldType::is_array(name) && !FieldType::is_obj(name), "invalid class name"); +#if INCLUDE_JFR EventClassLoad class_load_start_event; +#endif // UseNewReflection // Fix for 4474172; see evaluation for more details @@ -878,7 +882,9 @@ return NULL; } +#if INCLUDE_JFR post_class_load_event(class_load_start_event, k, class_loader); +#endif #ifdef ASSERT { @@ -1003,7 +1009,9 @@ TRAPS) { TempNewSymbol parsed_name = NULL; +#if INCLUDE_JFR EventClassLoad class_load_start_event; +#endif ClassLoaderData* loader_data; if (host_klass.not_null()) { @@ -1064,7 +1072,9 @@ JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } +#if INCLUDE_JFR post_class_load_event(class_load_start_event, k, class_loader); +#endif } assert(host_klass.not_null() || cp_patches == NULL, "cp_patches only found with host_klass"); @@ -1434,6 +1444,7 @@ } } +#if INCLUDE_JFR static void post_class_define_event(InstanceKlass* k, const ClassLoaderData* def_cld) { EventClassDefine event; if (event.should_commit()) { @@ -1442,6 +1453,7 @@ event.commit(); } } +#endif void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { @@ -1513,7 +1525,9 @@ } +#if INCLUDE_JFR post_class_define_event(k(), loader_data); +#endif } // Support parallel classloading --- old/src/share/vm/classfile/vmSymbols.hpp 2020-01-16 16:56:29.107510582 +0300 +++ new/src/share/vm/classfile/vmSymbols.hpp 2020-01-16 16:56:29.059512130 +0300 @@ -25,7 +25,9 @@ #ifndef SHARE_VM_CLASSFILE_VMSYMBOLS_HPP #define SHARE_VM_CLASSFILE_VMSYMBOLS_HPP +#if INCLUDE_JFR #include "jfr/support/jfrIntrinsics.hpp" +#endif #include "memory/iterator.hpp" #include "oops/symbol.hpp" #include "utilities/macros.hpp" @@ -608,7 +610,7 @@ template(classLoader_name, "classLoader") \ \ /* jfr signatures */ \ - JFR_TEMPLATES(template) \ + JFR_ONLY(JFR_TEMPLATES(template)) \ \ /*end*/ @@ -737,7 +739,7 @@ do_intrinsic(_nanoTime, java_lang_System, nanoTime_name, void_long_signature, F_S) \ do_name( nanoTime_name, "nanoTime") \ \ - JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \ + JFR_ONLY(JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)) \ \ do_intrinsic(_arraycopy, java_lang_System, arraycopy_name, arraycopy_signature, F_S) \ do_name( arraycopy_name, "arraycopy") \ --- old/src/share/vm/code/codeCache.cpp 2020-01-16 16:56:29.315503870 +0300 +++ new/src/share/vm/code/codeCache.cpp 2020-01-16 16:56:29.271505290 +0300 @@ -32,7 +32,9 @@ #include "code/pcDesc.hpp" #include "compiler/compileBroker.hpp" #include "gc_implementation/shared/markSweep.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/allocation.inline.hpp" #include "memory/gcLocker.hpp" #include "memory/iterator.hpp" @@ -784,6 +786,7 @@ void CodeCache::report_codemem_full() { _codemem_full_count++; +#if INCLUDE_JFR EventCodeCacheFull event; if (event.should_commit()) { event.set_codeBlobType((u1)CodeBlobType::All); @@ -797,6 +800,7 @@ event.set_fullCount(_codemem_full_count); event.commit(); } +#endif } //------------------------------------------------------------------------------------------------ --- old/src/share/vm/compiler/compileBroker.cpp 2020-01-16 16:56:29.523497158 +0300 +++ new/src/share/vm/compiler/compileBroker.cpp 2020-01-16 16:56:29.475498707 +0300 @@ -30,7 +30,9 @@ #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" #include "interpreter/linkResolver.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/allocation.inline.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" @@ -1913,6 +1915,7 @@ tty->print("%s", s.as_string()); } +#if INCLUDE_JFR static void post_compilation_event(EventCompilation* event, CompileTask* task) { assert(event != NULL, "invariant"); assert(event->should_commit(), "invariant"); @@ -1925,6 +1928,7 @@ event->set_inlinedBytes(task->num_inlined_bytecodes()); event->commit(); } +#endif // ------------------------------------------------------------------ // CompileBroker::invoke_compiler_on_method @@ -2003,7 +2007,9 @@ ciMethod* target = ci_env.get_method_from_handle(target_handle); TraceTime t1("compilation", &time); +#if INCLUDE_JFR EventCompilation event; +#endif AbstractCompiler *comp = compiler(task_level); if (comp == NULL) { @@ -2036,12 +2042,14 @@ task->print_compilation(tty, msg); } +#if INCLUDE_JFR EventCompilationFailure event; if (event.should_commit()) { event.set_compileId(compile_id); event.set_failureMessage(failure_reason); event.commit(); } +#endif } else { task->mark_success(); task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes()); @@ -2054,9 +2062,11 @@ } // simulate crash during compilation assert(task->compile_id() != CICrashAt, "just as planned"); +#if INCLUDE_JFR if (event.should_commit()) { post_compilation_event(&event, task); } +#endif } pop_jni_handle_block(); --- old/src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.cpp 2020-01-16 16:56:29.735490317 +0300 +++ new/src/share/vm/gc_implementation/g1/g1HeapRegionEventSender.cpp 2020-01-16 16:56:29.687491866 +0300 @@ -31,12 +31,14 @@ class DumpEventInfoClosure : public HeapRegionClosure { public: bool doHeapRegion(HeapRegion* r) { +#if INCLUDE_JFR EventG1HeapRegionInformation evt; evt.set_index(r->hrm_index()); // XXX TODO evt.set_type(r->get_trace_type()); evt.set_start((uintptr_t)r->bottom()); evt.set_used(r->used()); evt.commit(); +#endif return false; } }; --- old/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp 2020-01-16 16:56:29.927484121 +0300 +++ new/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp 2020-01-16 16:56:29.879485670 +0300 @@ -31,6 +31,7 @@ G1HeapRegionTraceType::Type to, uintptr_t start, size_t used) { +#if INCLUDE_JFR EventG1HeapRegionTypeChange e; if (e.should_commit()) { e.set_index(index); @@ -40,4 +41,5 @@ e.set_used(used); e.commit(); } +#endif } --- old/src/share/vm/gc_implementation/shared/ageTableTracer.cpp 2020-01-16 16:56:30.139477280 +0300 +++ new/src/share/vm/gc_implementation/shared/ageTableTracer.cpp 2020-01-16 16:56:30.075479345 +0300 @@ -29,6 +29,7 @@ #include "jfr/jfrEvents.hpp" void AgeTableTracer::send_tenuring_distribution_event(uint age, size_t size, GCTracer &tracer) { +#if INCLUDE_JFR EventTenuringDistribution e; if (e.should_commit()) { e.set_gcId(tracer.gc_id().id()); @@ -36,8 +37,13 @@ e.set_size(size); e.commit(); } +#endif } bool AgeTableTracer::is_tenuring_distribution_event_enabled() { +#if INCLUDE_JFR return EventTenuringDistribution::is_enabled(); +#else + return false; +#endif } --- old/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2020-01-16 16:56:30.335470956 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2020-01-16 16:56:30.287472504 +0300 @@ -23,7 +23,9 @@ */ #include "precompiled.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" @@ -40,6 +42,7 @@ typedef uintptr_t TraceAddress; void GCTracer::send_garbage_collection_event() const { +#if INCLUDE_JFR EventGarbageCollection event(UNTIMED); if (event.should_commit()) { event.set_gcId(_shared_gc_info.gc_id().id()); @@ -51,9 +54,11 @@ event.set_endtime(_shared_gc_info.end_timestamp()); event.commit(); } +#endif } void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { +#if INCLUDE_JFR EventGCReferenceStatistics e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -61,10 +66,12 @@ e.set_count(count); e.commit(); } +#endif } void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype, const MetaspaceChunkFreeListSummary& summary) const { +#if INCLUDE_JFR EventMetaspaceChunkFreeListSummary e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -85,9 +92,11 @@ e.commit(); } +#endif } void ParallelOldTracer::send_parallel_old_event() const { +#if INCLUDE_JFR EventParallelOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -96,9 +105,11 @@ e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); } +#endif } void YoungGCTracer::send_young_gc_event() const { +#if INCLUDE_JFR EventYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -107,20 +118,29 @@ e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); } +#endif } bool YoungGCTracer::should_send_promotion_in_new_plab_event() const { +#if INCLUDE_JFR return EventPromoteObjectInNewPLAB::is_enabled(); + #else + return false; +#endif } bool YoungGCTracer::should_send_promotion_outside_plab_event() const { +#if INCLUDE_JFR return EventPromoteObjectOutsidePLAB::is_enabled(); +#else + return false; +#endif } void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, uint age, bool tenured, size_t plab_size) const { - +#if INCLUDE_JFR EventPromoteObjectInNewPLAB event; if (event.should_commit()) { event.set_gcId(_shared_gc_info.gc_id().id()); @@ -131,11 +151,12 @@ event.set_plabSize(plab_size); event.commit(); } +#endif } void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size, uint age, bool tenured) const { - +#if INCLUDE_JFR EventPromoteObjectOutsidePLAB event; if (event.should_commit()) { event.set_gcId(_shared_gc_info.gc_id().id()); @@ -145,9 +166,11 @@ event.set_tenuringAge(age); event.commit(); } +#endif } void OldGCTracer::send_old_gc_event() const { +#if INCLUDE_JFR EventOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -155,8 +178,10 @@ e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); } +#endif } +#if INCLUDE_JFR static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) { JfrStructCopyFailed failed_info; failed_info.set_objectCount(cf_info.failed_count()); @@ -165,8 +190,10 @@ failed_info.set_totalSize(cf_info.total_size()); return failed_info; } +#endif void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { +#if INCLUDE_JFR EventPromotionFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -174,19 +201,23 @@ e.set_thread(pf_info.thread()->thread_id()); e.commit(); } +#endif } // Common to CMS and G1 void OldGCTracer::send_concurrent_mode_failure_event() { +#if INCLUDE_JFR EventConcurrentModeFailure e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.commit(); } +#endif } #if INCLUDE_ALL_GCS void G1NewTracer::send_g1_young_gc_event() { +#if INCLUDE_JFR EventG1GarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -195,9 +226,11 @@ e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); } +#endif } void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) { +#if INCLUDE_JFR EventG1MMU e; if (e.should_commit()) { e.set_gcId(GCId::peek().id()); @@ -206,9 +239,11 @@ e.set_pauseTarget((s8)max_time_ms); e.commit(); } +#endif } void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { +#if INCLUDE_JFR EventEvacuationInformation e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -222,15 +257,18 @@ e.set_regionsFreed(info->regions_freed()); e.commit(); } +#endif } void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { +#if INCLUDE_JFR EventEvacuationFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_evacuationFailed(to_struct(ef_info)); e.commit(); } +#endif } // XXX @@ -311,6 +349,7 @@ #endif // INCLUDE_ALL_GCS +#if INCLUDE_JFR static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) { JfrStructVirtualSpace space; space.set_start((TraceAddress)summary.start()); @@ -329,6 +368,7 @@ space.set_size(summary.size()); return space; } +#endif class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { GCId _gc_id; @@ -337,6 +377,7 @@ GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} void visit(const GCHeapSummary* heap_summary) const { +#if INCLUDE_JFR const VirtualSpaceSummary& heap_space = heap_summary->heap(); EventGCHeapSummary e; @@ -347,9 +388,11 @@ e.set_heapUsed(heap_summary->used()); e.commit(); } +#endif } void visit(const G1HeapSummary* g1_heap_summary) const { +#if INCLUDE_JFR visit((GCHeapSummary*)g1_heap_summary); EventG1HeapSummary e; @@ -362,9 +405,11 @@ e.set_numberOfRegions(g1_heap_summary->numberOfRegions()); e.commit(); } +#endif } void visit(const PSHeapSummary* ps_heap_summary) const { +#if INCLUDE_JFR visit((GCHeapSummary*)ps_heap_summary); const VirtualSpaceSummary& old_summary = ps_heap_summary->old(); @@ -387,6 +432,7 @@ e.set_toSpace(to_struct(ps_heap_summary->to())); e.commit(); } +#endif } }; @@ -395,6 +441,7 @@ heap_summary.accept(&visitor); } +#if INCLUDE_JFR static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) { JfrStructMetaspaceSizes meta_sizes; @@ -404,8 +451,10 @@ return meta_sizes; } +#endif void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { +#if INCLUDE_JFR EventMetaspaceSummary e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); @@ -416,6 +465,7 @@ e.set_classSpace(to_struct(meta_space_summary.class_space())); e.commit(); } +#endif } class PhaseSender : public PhaseVisitor { @@ -425,6 +475,7 @@ template void send_phase(GCPhase* phase) { +#if INCLUDE_JFR T event(UNTIMED); if (event.should_commit()) { event.set_gcId(_gc_id.id()); @@ -433,6 +484,7 @@ event.set_endtime(phase->end()); event.commit(); } +#endif } void visit(GCPhase* pause) { ShouldNotReachHere(); } @@ -440,6 +492,7 @@ void visit(PausePhase* pause) { assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types"); +#if INCLUDE_JFR switch (pause->level()) { case 0: send_phase(pause); break; case 1: send_phase(pause); break; @@ -447,6 +500,7 @@ case 3: send_phase(pause); break; default: /* Ignore sending this phase */ break; } +#endif } }; --- old/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2020-01-16 16:56:30.543464244 +0300 +++ new/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2020-01-16 16:56:30.499465663 +0300 @@ -46,15 +46,20 @@ bool ObjectCountEventSender::_should_send_requestable_event = false; void ObjectCountEventSender::enable_requestable_event() { +#if INCLUDE_JFR _should_send_requestable_event = true; +#endif } void ObjectCountEventSender::disable_requestable_event() { +#if INCLUDE_JFR _should_send_requestable_event = false; +#endif } template void ObjectCountEventSender::send_event_if_enabled(Klass* klass, GCId gc_id, jlong count, julong size, const Ticks& timestamp) { +#if INCLUDE_JFR T event(UNTIMED); if (event.should_commit()) { event.set_gcId(gc_id.id()); @@ -64,15 +69,18 @@ event.set_endtime(timestamp); event.commit(); } +#endif } void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { +#if INCLUDE_JFR Klass* klass = entry->klass(); jlong count = entry->count(); julong total_size = entry->words() * BytesPerWord; send_event_if_enabled(klass, gc_id, count, total_size, timestamp); send_event_if_enabled(klass, gc_id, count, total_size, timestamp); +#endif } #endif // INCLUDE_SERVICES --- old/src/share/vm/gc_interface/allocTracer.cpp 2020-01-16 16:56:30.743457790 +0300 +++ new/src/share/vm/gc_interface/allocTracer.cpp 2020-01-16 16:56:30.695459339 +0300 @@ -28,22 +28,25 @@ #include "jfr/jfrEvents.hpp" #include "runtime/handles.hpp" #include "utilities/globalDefinitions.hpp" -#if INCLUDE_JFR #include "jfr/support/jfrAllocationTracer.hpp" -#endif void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) { JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) + +#if INCLUDE_JFR EventObjectAllocationOutsideTLAB event; if (event.should_commit()) { event.set_objectClass(klass()); event.set_allocationSize(alloc_size); event.commit(); } +#endif } void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread) { JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) + +#if INCLUDE_JFR EventObjectAllocationInNewTLAB event; if (event.should_commit()) { event.set_objectClass(klass()); @@ -51,13 +54,16 @@ event.set_tlabSize(tlab_size); event.commit(); } +#endif } void AllocTracer::send_allocation_requiring_gc_event(size_t size, const GCId& gcId) { +#if INCLUDE_JFR EventAllocationRequiringGC event; if (event.should_commit()) { event.set_gcId(gcId.id()); event.set_size(size); event.commit(); } +#endif } --- old/src/share/vm/jfr/jfrEvents.hpp 2020-01-16 16:56:30.947451207 +0300 +++ new/src/share/vm/jfr/jfrEvents.hpp 2020-01-16 16:56:30.899452756 +0300 @@ -29,7 +29,12 @@ * * Include this header to access the machine generated event class. */ + +#if INCLUDE_JFR + #include "jfrfiles/jfrEventClasses.hpp" #include "jfrfiles/jfrEventIds.hpp" +#endif + #endif // SHARE_VM_JFR_JFREVENTS_HPP --- old/src/share/vm/memory/metaspaceTracer.cpp 2020-01-16 16:56:31.139445012 +0300 +++ new/src/share/vm/memory/metaspaceTracer.cpp 2020-01-16 16:56:31.091446560 +0300 @@ -31,6 +31,7 @@ void MetaspaceTracer::report_gc_threshold(size_t old_val, size_t new_val, MetaspaceGCThresholdUpdater::Type updater) const { +#if INCLUDE_JFR EventMetaspaceGCThreshold event; if (event.should_commit()) { event.set_oldValue(old_val); @@ -38,20 +39,25 @@ event.set_updater((u1)updater); event.commit(); } +#endif } void MetaspaceTracer::report_metaspace_allocation_failure(ClassLoaderData *cld, size_t word_size, MetaspaceObj::Type objtype, Metaspace::MetadataType mdtype) const { +#if INCLUDE_JFR send_allocation_failure_event(cld, word_size, objtype, mdtype); +#endif } void MetaspaceTracer::report_metadata_oom(ClassLoaderData *cld, size_t word_size, MetaspaceObj::Type objtype, Metaspace::MetadataType mdtype) const { +#if INCLUDE_JFR send_allocation_failure_event(cld, word_size, objtype, mdtype); +#endif } template @@ -59,6 +65,7 @@ size_t word_size, MetaspaceObj::Type objtype, Metaspace::MetadataType mdtype) const { +#if INCLUDE_JFR E event; if (event.should_commit()) { if (cld->is_anonymous()) { @@ -78,4 +85,5 @@ event.set_metaspaceObjectType((u1) objtype); event.commit(); } +#endif } --- old/src/share/vm/opto/bytecodeInfo.cpp 2020-01-16 16:56:31.343438429 +0300 +++ new/src/share/vm/opto/bytecodeInfo.cpp 2020-01-16 16:56:31.295439978 +0300 @@ -29,7 +29,9 @@ #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "oops/objArrayKlass.hpp" #include "opto/callGenerator.hpp" #include "opto/parse.hpp" @@ -480,6 +482,7 @@ return NULL; } +#if INCLUDE_JFR static void post_inlining_event(int compile_id,const char* msg, bool success, int bci, ciMethod* caller, ciMethod* callee) { assert(caller != NULL, "invariant"); assert(callee != NULL, "invariant"); @@ -498,6 +501,7 @@ event.commit(); } } +#endif //------------------------------print_inlining--------------------------------- void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, @@ -520,7 +524,9 @@ //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); } } +#if INCLUDE_JFR post_inlining_event(C->compile_id(), inline_msg, success, caller_bci, caller_method, callee_method); +#endif } //------------------------------ok_to_inline----------------------------------- --- old/src/share/vm/opto/compile.cpp 2020-01-16 16:56:31.547431846 +0300 +++ new/src/share/vm/opto/compile.cpp 2020-01-16 16:56:31.495433524 +0300 @@ -32,7 +32,9 @@ #include "compiler/compileLog.hpp" #include "compiler/disassembler.hpp" #include "compiler/oopMap.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "opto/addnode.hpp" #include "opto/block.hpp" #include "opto/c2compiler.hpp" --- old/src/share/vm/opto/compile.hpp 2020-01-16 16:56:31.779424359 +0300 +++ new/src/share/vm/opto/compile.hpp 2020-01-16 16:56:31.735425780 +0300 @@ -31,7 +31,9 @@ #include "code/exceptionHandlerTable.hpp" #include "compiler/compilerOracle.hpp" #include "compiler/compileBroker.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "libadt/dict.hpp" #include "libadt/port.hpp" #include "libadt/vectset.hpp" @@ -633,6 +635,7 @@ } void print_method(CompilerPhaseType cpt, int level = 1) { +#if INCLUDE_JFR EventCompilerPhase event; if (event.should_commit()) { event.set_starttime(C->_latest_stage_start_counter); @@ -641,6 +644,7 @@ event.set_phaseLevel(level); event.commit(); } +#endif #ifndef PRODUCT @@ -650,6 +654,7 @@ } void end_method(int level = 1) { +#if INCLUDE_JFR EventCompilerPhase event; if (event.should_commit()) { event.set_starttime(C->_latest_stage_start_counter); @@ -658,6 +663,7 @@ event.set_phaseLevel(level); event.commit(); } +#endif #ifndef PRODUCT if (_printer) _printer->end_method(); #endif --- old/src/share/vm/opto/library_call.cpp 2020-01-16 16:56:31.995417390 +0300 +++ new/src/share/vm/opto/library_call.cpp 2020-01-16 16:56:31.943419068 +0300 @@ -27,7 +27,9 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" +#if INCLUDE_JFR #include "jfr/support/jfrIntrinsics.hpp" +#endif #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/callGenerator.hpp" --- old/src/share/vm/prims/jni.cpp 2020-01-16 16:56:32.239409516 +0300 +++ new/src/share/vm/prims/jni.cpp 2020-01-16 16:56:32.191411065 +0300 @@ -32,8 +32,10 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "interpreter/linkResolver.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" #include "jfr/support/jfrThreadId.hpp" +#endif #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #if INCLUDE_ALL_GCS @@ -5020,6 +5022,7 @@ return &jni_NativeInterface; } +#if INCLUDE_JFR static void post_thread_start_event(const JavaThread* jt) { assert(jt != NULL, "invariant"); EventThreadStart event; @@ -5028,6 +5031,7 @@ event.commit(); } } +#endif // Invocation API @@ -5250,7 +5254,9 @@ JvmtiExport::post_thread_start(thread); } +#if INCLUDE_JFR post_thread_start_event(thread); +#endif #ifndef PRODUCT #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED @@ -5461,7 +5467,9 @@ JvmtiExport::post_thread_start(thread); } +#if INCLUDE_JFR post_thread_start_event(thread); +#endif *(JNIEnv**)penv = thread->jni_environment(); --- old/src/share/vm/prims/jvm.cpp 2020-01-16 16:56:32.487401513 +0300 +++ new/src/share/vm/prims/jvm.cpp 2020-01-16 16:56:32.439403063 +0300 @@ -37,7 +37,9 @@ #include "classfile/vmSymbols.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/bytecode.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/oopFactory.hpp" #include "memory/referenceType.hpp" #include "memory/universe.inline.hpp" @@ -445,11 +447,13 @@ JVM_ENTRY_NO_ENV(void, JVM_BeforeHalt()) JVMWrapper("JVM_BeforeHalt"); +#if INCLUDE_JFR EventShutdown event; if (event.should_commit()) { event.set_reason("Shutdown requested from Java"); event.commit(); } +#endif JVM_END @@ -3286,12 +3290,14 @@ } JVM_END +#if INCLUDE_JFR static void post_thread_sleep_event(EventThreadSleep* event, jlong millis) { assert(event != NULL, "invariant"); assert(event->should_commit(), "invariant"); event->set_time(millis); event->commit(); } +#endif JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis)) JVMWrapper("JVM_Sleep"); @@ -3315,7 +3321,9 @@ millis); #endif /* USDT2 */ +#if INCLUDE_JFR EventThreadSleep event; +#endif if (millis == 0) { // When ConvertSleepToYield is on, this matches the classic VM implementation of @@ -3337,9 +3345,11 @@ // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on // us while we were sleeping. We do not overwrite those. if (!HAS_PENDING_EXCEPTION) { +#if INCLUDE_JFR if (event.should_commit()) { post_thread_sleep_event(&event, millis); } +#endif #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1); #else /* USDT2 */ @@ -3353,9 +3363,11 @@ } thread->osthread()->set_state(old_state); } +#if INCLUDE_JFR if (event.should_commit()) { post_thread_sleep_event(&event, millis); } +#endif #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0); #else /* USDT2 */ --- old/src/share/vm/prims/unsafe.cpp 2020-01-16 16:56:32.723393899 +0300 +++ new/src/share/vm/prims/unsafe.cpp 2020-01-16 16:56:32.679395318 +0300 @@ -28,7 +28,9 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/allocation.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" @@ -1236,6 +1238,7 @@ #endif UNSAFE_END +#if INCLUDE_JFR static void post_thread_park_event(EventThreadPark* event, const oop obj, jlong timeout_nanos, jlong until_epoch_millis) { assert(event != NULL, "invariant"); assert(event->should_commit(), "invariant"); @@ -1245,10 +1248,13 @@ event->set_address((obj != NULL) ? (u8)cast_from_oop(obj) : 0); event->commit(); } +#endif UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) UnsafeWrapper("Unsafe_Park"); +#if INCLUDE_JFR EventThreadPark event; +#endif #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time); #else /* USDT2 */ @@ -1263,6 +1269,7 @@ HOTSPOT_THREAD_PARK_END( (uintptr_t) thread->parker()); #endif /* USDT2 */ +#if INCLUDE_JFR if (event.should_commit()) { const oop obj = thread->current_park_blocker(); if (time == 0) { @@ -1275,6 +1282,7 @@ } } } +#endif UNSAFE_END UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) --- old/src/share/vm/runtime/biasedLocking.cpp 2020-01-16 16:56:32.935387057 +0300 +++ new/src/share/vm/runtime/biasedLocking.cpp 2020-01-16 16:56:32.887388607 +0300 @@ -31,8 +31,10 @@ #include "runtime/vframe.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" +#if INCLUDE_JFR #include "jfr/support/jfrThreadId.hpp" #include "jfr/jfrEvents.hpp" +#endif static bool _biased_locking_enabled = false; BiasedLockingCounters BiasedLocking::_counters; @@ -453,7 +455,7 @@ GrowableArray* _objs; JavaThread* _requesting_thread; BiasedLocking::Condition _status_code; - traceid _biased_locker_id; + JFR_ONLY(traceid _biased_locker_id;) public: VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) @@ -461,14 +463,20 @@ , _objs(NULL) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) {} +#if INCLUDE_JFR + , _biased_locker_id(0) +#endif + {} VM_RevokeBias(GrowableArray* objs, JavaThread* requesting_thread) : _obj(NULL) , _objs(objs) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) {} +#if INCLUDE_JFR + , _biased_locker_id(0) +#endif + {} virtual VMOp_Type type() const { return VMOp_RevokeBias; } @@ -499,9 +507,11 @@ } JavaThread* biased_locker = NULL; _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); +#if INCLUDE_JFR if (biased_locker != NULL) { _biased_locker_id = JFR_THREAD_ID(biased_locker); } +#endif clean_up_cached_monitor_info(); return; } else { @@ -516,9 +526,11 @@ return _status_code; } +#if INCLUDE_JFR traceid biased_locker() const { return _biased_locker_id; } +#endif }; @@ -628,19 +640,26 @@ if (TraceBiasedLocking) { tty->print_cr("Revoking bias by walking my own stack:"); } +#if INCLUDE_JFR EventBiasedLockSelfRevocation event; +#endif BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); assert(cond == BIAS_REVOKED, "why not?"); +#if INCLUDE_JFR if (event.should_commit()) { event.set_lockClass(k); event.commit(); } +#endif return cond; } else { +#if INCLUDE_JFR EventBiasedLockRevocation event; +#endif VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); VMThread::execute(&revoke); +#if INCLUDE_JFR if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { event.set_lockClass(k); // Subtract 1 to match the id of events committed inside the safepoint @@ -648,17 +667,21 @@ event.set_previousOwner(revoke.biased_locker()); event.commit(); } +#endif return revoke.status_code(); } } assert((heuristics == HR_BULK_REVOKE) || (heuristics == HR_BULK_REBIAS), "?"); +#if INCLUDE_JFR EventBiasedLockClassRevocation event; +#endif VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, (heuristics == HR_BULK_REBIAS), attempt_rebias); VMThread::execute(&bulk_revoke); +#if INCLUDE_JFR if (event.should_commit()) { event.set_revokedClass(obj->klass()); event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); @@ -666,6 +689,7 @@ event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); event.commit(); } +#endif return bulk_revoke.status_code(); } --- old/src/share/vm/runtime/globals.cpp 2020-01-16 16:56:33.139380475 +0300 +++ new/src/share/vm/runtime/globals.cpp 2020-01-16 16:56:33.091382024 +0300 @@ -23,7 +23,9 @@ */ #include "precompiled.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" @@ -629,7 +631,9 @@ if (result == NULL) return false; if (!result->is_bool()) return false; bool old_value = result->get_bool(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif result->set_bool(*value); *value = old_value; result->set_origin(origin); @@ -639,7 +643,9 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type"); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, faddr->get_bool(), value, origin); +#endif faddr->set_bool(value); faddr->set_origin(origin); } @@ -657,7 +663,9 @@ if (result == NULL) return false; if (!result->is_intx()) return false; intx old_value = result->get_intx(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif result->set_intx(*value); *value = old_value; result->set_origin(origin); @@ -667,7 +675,9 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type"); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, faddr->get_intx(), value, origin); +#endif faddr->set_intx(value); faddr->set_origin(origin); } @@ -685,7 +695,9 @@ if (result == NULL) return false; if (!result->is_uintx()) return false; uintx old_value = result->get_uintx(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif result->set_uintx(*value); *value = old_value; result->set_origin(origin); @@ -695,7 +707,9 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type"); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, faddr->get_uintx(), value, origin); +#endif faddr->set_uintx(value); faddr->set_origin(origin); } @@ -713,7 +727,9 @@ if (result == NULL) return false; if (!result->is_uint64_t()) return false; uint64_t old_value = result->get_uint64_t(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif result->set_uint64_t(*value); *value = old_value; result->set_origin(origin); @@ -723,7 +739,9 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type"); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, faddr->get_uint64_t(), value, origin); +#endif faddr->set_uint64_t(value); faddr->set_origin(origin); } @@ -741,7 +759,9 @@ if (result == NULL) return false; if (!result->is_double()) return false; double old_value = result->get_double(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif result->set_double(*value); *value = old_value; result->set_origin(origin); @@ -751,7 +771,9 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_double(), "wrong flag type"); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, faddr->get_double(), value, origin); +#endif faddr->set_double(value); faddr->set_origin(origin); } @@ -769,7 +791,9 @@ if (result == NULL) return false; if (!result->is_ccstr()) return false; ccstr old_value = result->get_ccstr(); +#if INCLUDE_JFR trace_flag_changed(name, old_value, *value, origin); +#endif char* new_value = NULL; if (*value != NULL) { new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal); @@ -791,7 +815,9 @@ Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type"); ccstr old_value = faddr->get_ccstr(); +#if INCLUDE_JFR trace_flag_changed(faddr->_name, old_value, value, origin); +#endif char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal); strcpy(new_value, value); faddr->set_ccstr(new_value); --- old/src/share/vm/runtime/java.cpp 2020-01-16 16:56:33.339374021 +0300 +++ new/src/share/vm/runtime/java.cpp 2020-01-16 16:56:33.291375570 +0300 @@ -30,8 +30,10 @@ #include "compiler/compileBroker.hpp" #include "compiler/compilerOracle.hpp" #include "interpreter/bytecodeHistogram.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" #include "jfr/support/jfrThreadId.hpp" +#endif #include "memory/genCollectedHeap.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.hpp" @@ -525,11 +527,13 @@ } +#if INCLUDE_JFR EventThreadEnd event; if (event.should_commit()) { event.set_thread(JFR_THREAD_ID(thread)); event.commit(); } +#endif JFR_ONLY(Jfr::on_vm_shutdown();) --- old/src/share/vm/runtime/objectMonitor.cpp 2020-01-16 16:56:33.539367568 +0300 +++ new/src/share/vm/runtime/objectMonitor.cpp 2020-01-16 16:56:33.491369117 +0300 @@ -24,8 +24,10 @@ #include "precompiled.hpp" #include "classfile/vmSymbols.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" #include "jfr/support/jfrThreadId.hpp" +#endif #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" @@ -380,11 +382,13 @@ Atomic::inc_ptr(&_count); JFR_ONLY(JfrConditionalFlushWithStacktrace flush(jt);) +#if INCLUDE_JFR EventJavaMonitorEnter event; if (event.should_commit()) { event.set_monitorClass(((oop)this->object())->klass()); event.set_address((uintptr_t)(this->object_addr())); } +#endif { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); @@ -472,10 +476,12 @@ // just exited the monitor. } +#if INCLUDE_JFR if (event.should_commit()) { event.set_previousOwner((uintptr_t)_previous_owner_tid); event.commit(); } +#endif if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { ObjectMonitor::_sync_ContendedLockAttempts->inc() ; @@ -1448,6 +1454,7 @@ return v ; } +#if INCLUDE_JFR // helper method for posting a monitor wait event static void post_monitor_wait_event(EventJavaMonitorWait* event, ObjectMonitor* monitor, @@ -1462,6 +1469,7 @@ event->set_timedOut(timedout); event->commit(); } +#endif // ----------------------------------------------------------------------------- // Wait/Notify/NotifyAll @@ -1478,7 +1486,9 @@ // Throw IMSX or IEX. CHECK_OWNER(); +#if INCLUDE_JFR EventJavaMonitorWait event; +#endif // check for a pending interrupt if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { @@ -1496,9 +1506,11 @@ // consume an unpark() meant for the ParkEvent associated with // this ObjectMonitor. } +#if INCLUDE_JFR if (event.should_commit()) { post_monitor_wait_event(&event, this, 0, millis, false); } +#endif TEVENT (Wait - Throw IEX) ; THROW(vmSymbols::java_lang_InterruptedException()); return ; @@ -1640,9 +1652,11 @@ } } +#if INCLUDE_JFR if (event.should_commit()) { post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT); } +#endif OrderAccess::fence() ; @@ -1724,7 +1738,9 @@ } iterator->_notified = 1 ; Thread * Self = THREAD; +#if INCLUDE_JFR iterator->_notifier_tid = JFR_THREAD_ID(Self); +#endif ObjectWaiter * List = _EntryList ; if (List != NULL) { @@ -1850,7 +1866,9 @@ guarantee (iterator->_notified == 0, "invariant") ; iterator->_notified = 1 ; Thread * Self = THREAD; +#if INCLUDE_JFR iterator->_notifier_tid = JFR_THREAD_ID(Self); +#endif if (Policy != 4) { iterator->TState = ObjectWaiter::TS_ENTER ; } --- old/src/share/vm/runtime/objectMonitor.hpp 2020-01-16 16:56:33.755360598 +0300 +++ new/src/share/vm/runtime/objectMonitor.hpp 2020-01-16 16:56:33.707362147 +0300 @@ -42,7 +42,9 @@ ObjectWaiter * volatile _next; ObjectWaiter * volatile _prev; Thread* _thread; +#if INCLUDE_JFR jlong _notifier_tid; +#endif ParkEvent * _event; volatile int _notified ; volatile TStates TState ; --- old/src/share/vm/runtime/safepoint.cpp 2020-01-16 16:56:33.951354273 +0300 +++ new/src/share/vm/runtime/safepoint.cpp 2020-01-16 16:56:33.903355822 +0300 @@ -32,7 +32,9 @@ #include "code/scopeDesc.hpp" #include "gc_interface/collectedHeap.hpp" #include "interpreter/interpreter.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" @@ -90,6 +92,7 @@ event->set_safepointId(SafepointSynchronize::safepoint_counter() + adjustment); } +#if INCLUDE_JFR static void post_safepoint_begin_event(EventSafepointBegin* event, int thread_count, int critical_thread_count) { @@ -150,6 +153,7 @@ event->commit(); } } +#endif // -------------------------------------------------------------------------------------------------- // Implementation of Safepoint begin/end @@ -165,7 +169,9 @@ // Roll all threads forward to a safepoint and suspend them all void SafepointSynchronize::begin() { +#if INCLUDE_JFR EventSafepointBegin begin_event; +#endif Thread* myThread = Thread::current(); assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint"); @@ -257,7 +263,9 @@ // between states, the safepointing code will wait for the thread to // block itself when it attempts transitions to a new state. // +#if INCLUDE_JFR EventSafepointStateSynchronization sync_event; +#endif int initial_running = 0; _state = _synchronizing; @@ -410,13 +418,17 @@ update_statistics_on_spin_end(); } +#if INCLUDE_JFR if (sync_event.should_commit()) { post_safepoint_synchronize_event(&sync_event, initial_running, _waiting_to_block, iterations); } +#endif // wait until all threads are stopped { +#if INCLUDE_JFR EventSafepointWaitBlocked wait_blocked_event; +#endif int initial_waiting_to_block = _waiting_to_block; while (_waiting_to_block > 0) { @@ -456,9 +468,11 @@ OrderAccess::fence(); +#if INCLUDE_JFR if (wait_blocked_event.should_commit()) { post_safepoint_wait_blocked_event(&wait_blocked_event, initial_waiting_to_block); } +#endif } #ifdef ASSERT @@ -483,11 +497,15 @@ // Call stuff that needs to be run when a safepoint is just about to be completed { +#if INCLUDE_JFR EventSafepointCleanup cleanup_event; +#endif do_cleanup_tasks(); +#if INCLUDE_JFR if (cleanup_event.should_commit()) { post_safepoint_cleanup_event(&cleanup_event); } +#endif } if (PrintSafepointStatistics) { @@ -495,9 +513,11 @@ update_statistics_on_cleanup_end(os::javaTimeNanos()); } +#if INCLUDE_JFR if (begin_event.should_commit()) { post_safepoint_begin_event(&begin_event, nof_threads, _current_jni_active_count); } +#endif } // Wake up all threads, so they are ready to resume execution after the safepoint @@ -506,7 +526,9 @@ assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); assert((_safepoint_counter & 0x1) == 1, "must be odd"); +#if INCLUDE_JFR EventSafepointEnd event; +#endif _safepoint_counter ++; // memory fence isn't required here since an odd _safepoint_counter // value can do no harm and a fence is issued below anyway. @@ -592,9 +614,11 @@ // record this time so VMThread can keep track how much time has elasped // since last safepoint. _end_of_last_safepoint = os::javaTimeMillis(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_end_event(&event); } +#endif } bool SafepointSynchronize::is_cleanup_needed() { @@ -609,61 +633,85 @@ void SafepointSynchronize::do_cleanup_tasks() { { const char* name = "deflating idle monitors"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t1(name, TraceSafepointCleanupTime); ObjectSynchronizer::deflate_idle_monitors(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } { const char* name = "updating inline caches"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t2(name, TraceSafepointCleanupTime); InlineCacheBuffer::update_inline_caches(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } { const char* name = "compilation policy safepoint handler"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t3(name, TraceSafepointCleanupTime); CompilationPolicy::policy()->do_safepoint_work(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } { const char* name = "mark nmethods"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t4(name, TraceSafepointCleanupTime); NMethodSweeper::mark_active_nmethods(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } if (SymbolTable::needs_rehashing()) { const char* name = "rehashing symbol table"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t5(name, TraceSafepointCleanupTime); SymbolTable::rehash_table(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } if (StringTable::needs_rehashing()) { const char* name = "rehashing string table"; +#if INCLUDE_JFR EventSafepointCleanupTask event; +#endif TraceTime t6(name, TraceSafepointCleanupTime); StringTable::rehash_table(); +#if INCLUDE_JFR if (event.should_commit()) { post_safepoint_cleanup_task_event(&event, name); } +#endif } // rotate log files? --- old/src/share/vm/runtime/sweeper.cpp 2020-01-16 16:56:34.163347433 +0300 +++ new/src/share/vm/runtime/sweeper.cpp 2020-01-16 16:56:34.115348981 +0300 @@ -28,7 +28,9 @@ #include "code/icBuffer.hpp" #include "code/nmethod.hpp" #include "compiler/compileBroker.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "runtime/atomic.hpp" @@ -318,6 +320,7 @@ } } +#if INCLUDE_JFR static void post_sweep_event(EventSweepCodeCache* event, const Ticks& start, const Ticks& end, @@ -335,6 +338,7 @@ event->set_zombifiedCount(zombified); event->commit(); } +#endif void NMethodSweeper::sweep_code_cache() { ResourceMark rm; @@ -410,10 +414,12 @@ _total_flushed_size += freed_memory; _total_nof_methods_reclaimed += _flushed_count; +#if INCLUDE_JFR EventSweepCodeCache event(UNTIMED); if (event.should_commit()) { post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, _flushed_count, _zombified_count); } +#endif #ifdef ASSERT if(PrintMethodFlushing) { --- old/src/share/vm/runtime/synchronizer.cpp 2020-01-16 16:56:34.359341108 +0300 +++ new/src/share/vm/runtime/synchronizer.cpp 2020-01-16 16:56:34.311342657 +0300 @@ -24,7 +24,9 @@ #include "precompiled.hpp" #include "classfile/vmSymbols.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" @@ -1179,6 +1181,7 @@ TEVENT (omFlush) ; } +#if INCLUDE_JFR static void post_monitor_inflate_event(EventJavaMonitorInflate* event, const oop obj) { assert(event != NULL, "invariant"); @@ -1189,6 +1192,7 @@ // event->set_cause((u1)cause); event->commit(); } +#endif // Fast path code shared by multiple functions ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { @@ -1212,7 +1216,9 @@ assert (Universe::verify_in_progress() || !SafepointSynchronize::is_at_safepoint(), "invariant") ; +#if INCLUDE_JFR EventJavaMonitorInflate event; +#endif for (;;) { const markOop mark = object->mark() ; @@ -1344,9 +1350,11 @@ object->klass()->external_name()); } } +#if INCLUDE_JFR if (event.should_commit()) { post_monitor_inflate_event(&event, object); } +#endif return m ; } @@ -1397,9 +1405,11 @@ object->klass()->external_name()); } } +#if INCLUDE_JFR if (event.should_commit()) { post_monitor_inflate_event(&event, object); } +#endif return m ; } } --- old/src/share/vm/runtime/thread.cpp 2020-01-16 16:56:34.567334396 +0300 +++ new/src/share/vm/runtime/thread.cpp 2020-01-16 16:56:34.519335945 +0300 @@ -32,7 +32,9 @@ #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" #include "interpreter/oopMapCache.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "jvmtifiles/jvmtiEnv.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/metaspaceShared.hpp" @@ -3997,11 +3999,13 @@ Mutex::_as_suspend_equivalent_flag); } +#if INCLUDE_JFR EventShutdown e; if (e.should_commit()) { e.set_reason("No remaining non-daemon Java threads"); e.commit(); } +#endif // Hang forever on exit if we are reporting an error. if (ShowMessageBoxOnError && is_error_reported()) { --- old/src/share/vm/runtime/vmThread.cpp 2020-01-16 16:56:34.799326910 +0300 +++ new/src/share/vm/runtime/vmThread.cpp 2020-01-16 16:56:34.751328459 +0300 @@ -25,8 +25,10 @@ #include "precompiled.hpp" #include "compiler/compileBroker.hpp" #include "gc_interface/collectedHeap.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" #include "jfr/support/jfrThreadId.hpp" +#endif #include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" @@ -360,6 +362,7 @@ st->cr(); } +#if INCLUDE_JFR static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) { assert(event != NULL, "invariant"); assert(event->should_commit(), "invariant"); @@ -376,6 +379,7 @@ event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_counter() : 0); event->commit(); } +#endif void VMThread::evaluate_operation(VM_Operation* op) { ResourceMark rm; @@ -391,11 +395,15 @@ op->evaluation_mode()); #endif /* USDT2 */ +#if INCLUDE_JFR EventExecuteVMOperation event; +#endif op->evaluate(); +#if INCLUDE_JFR if (event.should_commit()) { post_vm_operation_event(&event, op); } +#endif #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()), --- old/src/share/vm/utilities/vmError.cpp 2020-01-16 16:56:34.999320456 +0300 +++ new/src/share/vm/utilities/vmError.cpp 2020-01-16 16:56:34.951322005 +0300 @@ -26,7 +26,9 @@ #include "precompiled.hpp" #include "compiler/compileBroker.hpp" #include "gc_interface/collectedHeap.hpp" +#if INCLUDE_JFR #include "jfr/jfrEvents.hpp" +#endif #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" #include "runtime/frame.inline.hpp" @@ -944,12 +946,13 @@ // are handled properly. reset_signal_handlers(); +#if INCLUDE_JFR EventShutdown e; if (e.should_commit()) { e.set_reason("VM Error"); e.commit(); } - +#endif JFR_ONLY(Jfr::on_vm_shutdown(true);) } else { // If UseOsErrorReporting we call this for each level of the call stack