--- old/make/Makefile 2019-02-15 19:02:03.392024646 +0300 +++ new/make/Makefile 2019-02-15 19:02:03.276028713 +0300 @@ -575,6 +575,11 @@ $(SED) 's/\(separated by \)[;:]/\1$(PATH_SEP)/g' $< > $@.temp $(MV) $@.temp $@ +# Java Flight Recorder +$(EXPORT_JRE_LIB_DIR)/jdk/jfr/internal/types/metadata.xml: $(HS_SRC_DIR)/share/vm/jfr/metadata/metadata.xml + mkdir -p $(basename $@) + cp $< $@ + # # Clean rules # --- old/make/bsd/makefiles/buildtree.make 2019-02-15 19:02:03.760011745 +0300 +++ new/make/bsd/makefiles/buildtree.make 2019-02-15 19:02:03.660015251 +0300 @@ -47,7 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - -# trace.make - generate tracing event and type definitions +# jfr.make - generate jfr event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -112,6 +112,10 @@ endif endif +ifeq ($(ENABLE_JFR),false) +ALWAYS_EXCLUDE_DIRS += -o -name jfr +endif + # Get things from the platform file. COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE)) @@ -120,7 +124,7 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ - $(PLATFORM_DIR)/generated/tracefiles \ + $(PLATFORM_DIR)/generated/jfrfiles \ $(PLATFORM_DIR)/generated/dtracefiles TARGETS = debug fastdebug optimized product @@ -130,7 +134,7 @@ BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X) -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make dtrace.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -200,6 +204,12 @@ DATA_MODE = $(DATA_MODE/$(BUILDARCH)) +ifeq ($(ENABLE_JFR), true) + INCLUDE_JFR = 1 +else + INCLUDE_JFR = 0 +endif + flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst @echo Creating $@ ... $(QUIETLY) ( \ @@ -275,6 +285,7 @@ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \ echo; \ [ -n "$(SPEC)" ] && \ echo "include $(SPEC)"; \ @@ -343,7 +354,7 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ -trace.make: $(BUILDTREE_MAKE) +jfr.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ --- old/make/bsd/makefiles/rules.make 2019-02-15 19:02:04.115999266 +0300 +++ new/make/bsd/makefiles/rules.make 2019-02-15 19:02:04.016002771 +0300 @@ -126,8 +126,8 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 +BOOT_SOURCE_LANGUAGE_VERSION = 8 +BOOT_TARGET_CLASS_VERSION = 8 JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/bsd/makefiles/top.make 2019-02-15 19:02:04.479986506 +0300 +++ new/make/bsd/makefiles/top.make 2019-02-15 19:02:04.315992255 +0300 @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff dtrace_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,9 +94,9 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) -# generate trace files -trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) - @$(MAKE) -f trace.make $(MFLAGS-adjusted) +# generate JFR files +jfr_stuff: $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f jfr.make $(MFLAGS-adjusted) ifeq ($(OS_VENDOR), Darwin) # generate dtrace header files --- old/make/bsd/makefiles/vm.make 2019-02-15 19:02:04.747977112 +0300 +++ new/make/bsd/makefiles/vm.make 2019-02-15 19:02:04.619981599 +0300 @@ -52,7 +52,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -163,24 +163,21 @@ LIBJVM_DIZ = lib$(JVM).diz endif +ifeq ($(ENABLE_JFR),false) +EXCLUDE_JFR_PATHS:= -o -name jfr -prune +endif SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt SOURCE_PATHS=\ $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \ - \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \)) + \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \)) SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles - -ifneq ($(INCLUDE_TRACE), false) -CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ - find $(HS_ALT_SRC)/share/vm/jfr -type d; \ - fi) -endif +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/defs.make 2019-02-15 19:02:05.151962952 +0300 +++ new/make/defs.make 2019-02-15 19:02:05.031967157 +0300 @@ -371,5 +371,9 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h +ifeq ($(ENABLE_JFR), true) +EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/jdk/jfr/internal/types/metadata.xml +endif + .PHONY: $(HS_ALT_MAKE)/defs.make --- old/make/linux/makefiles/buildtree.make 2019-02-15 19:02:05.491951034 +0300 +++ new/make/linux/makefiles/buildtree.make 2019-02-15 19:02:05.375955100 +0300 @@ -47,7 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - -# trace.make - generate tracing event and type definitions +# jfr.make - generate jfr event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -111,6 +111,10 @@ endif endif +ifeq ($(ENABLE_JFR),false) +ALWAYS_EXCLUDE_DIRS += -o -name jfr +endif + # Get things from the platform file. COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE)) @@ -118,7 +122,7 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ - $(PLATFORM_DIR)/generated/tracefiles + $(PLATFORM_DIR)/generated/jfrfiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -126,7 +130,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -199,6 +203,13 @@ DATA_MODE = $(DATA_MODE/$(BUILDARCH)) +ifeq ($(ENABLE_JFR), true) + INCLUDE_JFR = 1 +else + INCLUDE_JFR = 0 +endif + + flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst @echo Creating $@ ... $(QUIETLY) ( \ @@ -274,8 +285,7 @@ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ - [ -n "$(INCLUDE_TRACE)" ] && \ - echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \ + echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \ echo; \ [ -n "$(SPEC)" ] && \ echo "include $(SPEC)"; \ @@ -344,7 +354,7 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ -trace.make: $(BUILDTREE_MAKE) +jfr.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ --- old/make/linux/makefiles/rules.make 2019-02-15 19:02:05.867937856 +0300 +++ new/make/linux/makefiles/rules.make 2019-02-15 19:02:05.703943604 +0300 @@ -126,8 +126,8 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 +BOOT_SOURCE_LANGUAGE_VERSION = 8 +BOOT_TARGET_CLASS_VERSION = 8 JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/linux/makefiles/top.make 2019-02-15 19:02:06.247924539 +0300 +++ new/make/linux/makefiles/top.make 2019-02-15 19:02:06.099929725 +0300 @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,9 +94,9 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) -# generate trace files -trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) - @$(MAKE) -f trace.make $(MFLAGS-adjusted) +# generate JFR stuff +jfr_stuff: $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f jfr.make $(MFLAGS-adjusted) # generate SA jar files and native header sa_stuff: --- old/make/linux/makefiles/vm.make 2019-02-15 19:02:06.587912623 +0300 +++ new/make/linux/makefiles/vm.make 2019-02-15 19:02:06.483916267 +0300 @@ -54,7 +54,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -148,24 +148,21 @@ LIBJVM_DEBUGINFO = lib$(JVM).debuginfo LIBJVM_DIZ = lib$(JVM).diz +ifeq ($(ENABLE_JFR),false) +EXCLUDE_JFR_PATHS:= -o -name jfr -prune +endif SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt SOURCE_PATHS=\ $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \ - \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \)) + \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \)) SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles - -ifneq ($(INCLUDE_TRACE), false) -CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ - find $(HS_ALT_SRC)/share/vm/jfr -type d; \ - fi) -endif +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/solaris/makefiles/buildtree.make 2019-02-15 19:02:06.923900848 +0300 +++ new/make/solaris/makefiles/buildtree.make 2019-02-15 19:02:06.803905053 +0300 @@ -47,7 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - -# trace.make - generate tracing event and type definitions +# jfr.make - generate jfr event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -102,6 +102,10 @@ endif endif +ifeq ($(ENABLE_JFR),false) +ALWAYS_EXCLUDE_DIRS += -o -name jfr +endif + # Get things from the platform file. COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE)) @@ -109,7 +113,7 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ - $(PLATFORM_DIR)/generated/tracefiles + $(PLATFORM_DIR)/generated/jfrfiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -117,7 +121,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make jfr.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -189,6 +193,12 @@ DATA_MODE = $(DATA_MODE/$(BUILDARCH)) +ifeq ($(ENABLE_JFR), true) + INCLUDE_JFR = 1 +else + INCLUDE_JFR = 0 +endif + flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst @echo Creating $@ ... $(QUIETLY) ( \ @@ -264,9 +274,10 @@ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + echo && echo "CFLAGS += -DINCLUDE_JFR=$(INCLUDE_JFR)"; \ echo; \ - [ -n "$(INCLUDE_TRACE)" ] && \ - echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \ + [ -n "$(INCLUDE_JFR)" ] && \ + echo && echo "INCLUDE_JFR = $(INCLUDE_JFR)"; \ [ -n "$(SPEC)" ] && \ echo "include $(SPEC)"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \ @@ -334,7 +345,7 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ -trace.make: $(BUILDTREE_MAKE) +jfr.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ --- old/make/solaris/makefiles/rules.make 2019-02-15 19:02:07.243889633 +0300 +++ new/make/solaris/makefiles/rules.make 2019-02-15 19:02:07.127893698 +0300 @@ -118,8 +118,8 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 +BOOT_SOURCE_LANGUAGE_VERSION = 8 +BOOT_TARGET_CLASS_VERSION = 8 JAVAC_FLAGS = -g -encoding ascii BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/solaris/makefiles/top.make 2019-02-15 19:02:07.563878419 +0300 +++ new/make/solaris/makefiles/top.make 2019-02-15 19:02:07.435882905 +0300 @@ -73,7 +73,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff jfr_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -87,9 +87,9 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) -# generate trace files -trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) - @$(MAKE) -f trace.make $(MFLAGS-adjusted) +# generate JFR files +jfr_stuff: $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f jfr.make $(MFLAGS-adjusted) # generate SA jar files and native header sa_stuff: --- old/make/solaris/makefiles/vm.make 2019-02-15 19:02:07.903866505 +0300 +++ new/make/solaris/makefiles/vm.make 2019-02-15 19:02:07.783870710 +0300 @@ -48,7 +48,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor @@ -143,6 +143,8 @@ LIBS += -lkstat +LIBS += -lrt + # By default, link the *.o into the library, not the executable. LINK_INTO$(LINK_INTO) = LIBJVM @@ -160,25 +162,21 @@ LIBJVM_DEBUGINFO = lib$(JVM).debuginfo LIBJVM_DIZ = lib$(JVM).diz - +ifeq ($(ENABLE_JFR),false) +EXCLUDE_JFR_PATHS:= -o -name jfr -prune +endif SPECIAL_PATHS:=adlc c1 dist gc_implementation opto shark libadt SOURCE_PATHS=\ $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \ - \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \)) + \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) $(EXCLUDE_JFR_PATHS) \)) SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles - -ifneq ($(INCLUDE_TRACE), false) -CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ - find $(HS_ALT_SRC)/share/vm/jfr -type d; \ - fi) -endif +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/jfrfiles COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 --- old/make/windows/build.make 2019-02-15 19:02:08.219855433 +0300 +++ new/make/windows/build.make 2019-02-15 19:02:08.119858936 +0300 @@ -297,6 +297,7 @@ @ echo LD=$(LD) >> $@ @ echo MT=$(MT) >> $@ @ echo RC=$(RC) >> $@ + @ echo ENABLE_JFR=$(ENABLE_JFR) >> $@ @ sh $(WorkSpace)/make/windows/get_msc_ver.sh >> $@ @ if "$(ENABLE_FULL_DEBUG_SYMBOLS)" NEQ "" echo ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) >> $@ @ if "$(ZIP_DEBUGINFO_FILES)" NEQ "" echo ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) >> $@ --- old/make/windows/create_obj_files.sh 2019-02-15 19:02:08.567843239 +0300 +++ new/make/windows/create_obj_files.sh 2019-02-15 19:02:08.435847864 +0300 @@ -57,8 +57,8 @@ COMMONSRC=${WorkSpace}/${COMMONSRC_REL} ALTSRC=${WorkSpace}/${ALTSRC_REL} -BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt \); fi`" -BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt \)`" +BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt -o -name jfr \); fi`" +BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc_implementation -o -name opto -o -name shark -o -name libadt -o -name jfr \)`" for sd in \ share/vm/gc_implementation/shared \ @@ -71,10 +71,10 @@ BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}" done -BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles" +BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/jfrfiles" -if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers" +if [ "$ENABLE_JFR" = "true" ]; then +BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/jfr -type d`" fi BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" --- old/make/windows/makefiles/compile.make 2019-02-15 19:02:08.855833147 +0300 +++ new/make/windows/makefiles/compile.make 2019-02-15 19:02:08.775835951 +0300 @@ -314,6 +314,13 @@ CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG) +!if "$(ENABLE_JFR)" == "true" +INCLUDE_JFR=1 +!else +INCLUDE_JFR=0 +!endif +CXX_FLAGS=$(CXX_FLAGS) /D INCLUDE_JFR=$(INCLUDE_JFR) + # If NO_OPTIMIZATIONS is defined in the environment, turn everything off !ifdef NO_OPTIMIZATIONS PRODUCT_OPT_OPTION = $(DEBUG_OPT_OPTION) @@ -357,4 +364,3 @@ !if "$(MFC_DEBUG)" == "true" RC_FLAGS = $(RC_FLAGS) /D "_DEBUG" !endif - --- old/make/windows/makefiles/defs.make 2019-02-15 19:02:09.163822356 +0300 +++ new/make/windows/makefiles/defs.make 2019-02-15 19:02:09.047826420 +0300 @@ -306,3 +306,6 @@ MAKE_ARGS += MT="$(subst /,\\,$(MT))" endif endif + +MAKE_ARGS += ENABLE_JFR=$(ENABLE_JFR) + --- old/make/windows/makefiles/generated.make 2019-02-15 19:02:09.535809322 +0300 +++ new/make/windows/makefiles/generated.make 2019-02-15 19:02:09.451812265 +0300 @@ -30,9 +30,9 @@ JvmtiOutDir=jvmtifiles !include $(WorkSpace)/make/windows/makefiles/jvmti.make -# Pick up rules for building trace -TraceOutDir=tracefiles -!include $(WorkSpace)/make/windows/makefiles/trace.make +# Pick up rules for building JFR +JfrOutDir=jfrfiles +!include $(WorkSpace)/make/windows/makefiles/jfr.make # Pick up rules for building SA !include $(WorkSpace)/make/windows/makefiles/sa.make @@ -40,9 +40,9 @@ AdlcOutDir=adfiles !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") -default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles +default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(JfrGeneratedFiles) buildobjfiles !else -default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles +default:: $(JvmtiGeneratedFiles) $(JfrGeneratedFiles) buildobjfiles !endif buildobjfiles: --- old/make/windows/makefiles/rules.make 2019-02-15 19:02:09.847798391 +0300 +++ new/make/windows/makefiles/rules.make 2019-02-15 19:02:09.759801474 +0300 @@ -44,8 +44,8 @@ !endif # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION=6 -BOOT_TARGET_CLASS_VERSION=6 +BOOT_SOURCE_LANGUAGE_VERSION=8 +BOOT_TARGET_CLASS_VERSION=8 JAVAC_FLAGS=-g -encoding ascii BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) --- old/make/windows/makefiles/vm.make 2019-02-15 19:02:10.191786339 +0300 +++ new/make/windows/makefiles/vm.make 2019-02-15 19:02:10.047791384 +0300 @@ -140,7 +140,7 @@ VM_PATH=../generated VM_PATH=$(VM_PATH);../generated/adfiles VM_PATH=$(VM_PATH);../generated/jvmtifiles -VM_PATH=$(VM_PATH);../generated/tracefiles +VM_PATH=$(VM_PATH);../generated/jfrfiles VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code @@ -168,11 +168,6 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto -!if exists($(ALTSRC)\share\vm\jfr) -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers -!endif - VM_PATH={$(VM_PATH)} # Special case files not using precompiled header files. @@ -204,6 +199,12 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp +iphlp_interface.obj: $(WorkSpace)\src\os\windows\vm\iphlp_interface.cpp + $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\iphlp_interface.cpp + +os_perf_windows.obj: $(WorkSpace)\src\os\windows\vm\os_perf_windows.cpp + $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_perf_windows.cpp + # Default rules for the Virtual Machine {$(COMMONSRC)\share\vm\c1}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< @@ -379,13 +380,79 @@ {..\generated\jvmtifiles}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{..\generated\tracefiles}.cpp.obj:: +{..\generated\jfrfiles}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\dcmd}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\instrumentation}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\jni}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\leakprofiler}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\leakprofiler\chains}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\leakprofiler\checkpoint}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\leakprofiler\sampling}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\leakprofiler\utilities}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\metadata}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\periodic}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\periodic\sampling}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint\types}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\checkpoint\types\traceid}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\repository}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\service}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\stacktrace}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\storage}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\recorder\stringpool}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(COMMONSRC)\share\vm\jfr\support}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{$(ALTSRC)\share\vm\jfr}.cpp.obj:: +{$(COMMONSRC)\share\vm\jfr\utilities}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj:: +{$(COMMONSRC)\share\vm\jfr\writers}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< default:: --- old/make/windows/projectfiles/common/Makefile 2019-02-15 19:02:10.567773167 +0300 +++ new/make/windows/projectfiles/common/Makefile 2019-02-15 19:02:10.435777792 +0300 @@ -61,8 +61,8 @@ !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make # Pick up rules for building trace -TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles -!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make +JfrOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jfrfiles +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jfr.make !if "$(Variant)" == "compiler2" # Pick up rules for building adlc --- old/src/cpu/ppc/vm/frame_ppc.cpp 2019-02-15 19:02:10.943759995 +0300 +++ new/src/cpu/ppc/vm/frame_ppc.cpp 2019-02-15 19:02:10.847763358 +0300 @@ -49,14 +49,134 @@ bool frame::safe_for_sender(JavaThread *thread) { bool safe = false; - address cursp = (address)sp(); - address curfp = (address)fp(); - if ((cursp != NULL && curfp != NULL && - (cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) && - (curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) { - safe = true; + address sp = (address)_sp; + address fp = (address)_fp; + address unextended_sp = (address)_unextended_sp; + + // Consider stack guards when trying to determine "safe" stack pointers + static size_t stack_guard_size = os::uses_stack_guard_pages() ? + thread->stack_red_zone_size() + thread->stack_yellow_zone_size() : 0; + size_t usable_stack_size = thread->stack_size() - stack_guard_size; + + // sp must be within the usable part of the stack (not in guards) + bool sp_safe = (sp < thread->stack_base()) && + (sp >= thread->stack_base() - usable_stack_size); + + + if (!sp_safe) { + return false; + } + + // Unextended sp must be within the stack and above or equal sp + bool unextended_sp_safe = (unextended_sp < thread->stack_base()) && (unextended_sp >= sp); + + if (!unextended_sp_safe) { + return false; + } + + // An fp must be within the stack and above (but not equal) sp. + bool fp_safe = (fp <= thread->stack_base()) && (fp > sp); + // an interpreter fp must be within the stack and above (but not equal) sp + bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) && + ((fp - sp) >= (ijava_state_size + top_ijava_frame_abi_size)); + + // We know sp/unextended_sp are safe, only fp is questionable here + + // If the current frame is known to the code cache then we can attempt to + // to construct the sender and do some validation of it. This goes a long way + // toward eliminating issues when we get in frame construction code + + if (_cb != NULL ){ + // Entry frame checks + if (is_entry_frame()) { + // An entry frame must have a valid fp. + return fp_safe && is_entry_frame_valid(thread); + } + + // Now check if the frame is complete and the test is + // reliable. Unfortunately we can only check frame completeness for + // runtime stubs and nmethods. Other generic buffer blobs are more + // problematic so we just assume they are OK. Adapter blobs never have a + // complete frame and are never OK + if (!_cb->is_frame_complete_at(_pc)) { + if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { + return false; + } + } + + // Could just be some random pointer within the codeBlob. + if (!_cb->code_contains(_pc)) { + return false; + } + + if (is_interpreted_frame() && !fp_interp_safe) { + return false; + } + + abi_minframe* sender_abi = (abi_minframe*) fp; + intptr_t* sender_sp = (intptr_t*) fp; + address sender_pc = (address) sender_abi->lr;; + + // We must always be able to find a recognizable pc. + CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + if (sender_blob == NULL) { + return false; + } + + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + + // It should be safe to construct the sender though it might not be valid. + + frame sender(sender_sp, sender_pc); + + // Do we have a valid fp? + address sender_fp = (address) sender.fp(); + + // sender_fp must be within the stack and above (but not + // equal) current frame's fp. + if (sender_fp > thread->stack_base() || sender_fp <= fp) { + return false; + } + + // If the potential sender is the interpreter then we can do some more checking. + if (Interpreter::contains(sender_pc)) { + return sender.is_interpreted_frame_valid(thread); + } + + // Could just be some random pointer within the codeBlob. + if (!sender.cb()->code_contains(sender_pc)) { + return false; + } + + // We should never be able to see an adapter if the current frame is something from code cache. + if (sender_blob->is_adapter_blob()) { + return false; + } + + if (sender.is_entry_frame()) { + return sender.is_entry_frame_valid(thread); + } + + // Frame size is always greater than zero. If the sender frame size is zero or less, + // something is really weird and we better give up. + if (sender_blob->frame_size() <= 0) { + return false; + } + + return true; } - return safe; + + // Must be native-compiled frame. Since sender will try and use fp to find + // linkages it must be safe + + if (!fp_safe) { + return false; + } + + return true; } bool frame::is_interpreted_frame() const { --- old/src/os/aix/vm/os_aix.cpp 2019-02-15 19:02:11.315746964 +0300 +++ new/src/os/aix/vm/os_aix.cpp 2019-02-15 19:02:11.163752288 +0300 @@ -1555,6 +1555,11 @@ st->cr(); } +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + // Not yet implemented. + return 0; +} + void os::print_memory_info(outputStream* st) { st->print_cr("Memory:"); @@ -2793,6 +2798,10 @@ return ::read(fd, buf, nBytes); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + return ::pread(fd, buf, nBytes, offset); +} + #define NANOSECS_PER_MILLISEC 1000000 int os::sleep(Thread* thread, jlong millis, bool interruptible) { @@ -4184,8 +4193,7 @@ /* Scan the directory */ bool result = true; - char buf[sizeof(struct dirent) + MAX_PATH]; - while (result && (ptr = ::readdir(dir)) != NULL) { + while (result && (ptr = readdir(dir)) != NULL) { if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { result = false; } --- old/src/os/aix/vm/os_aix.inline.hpp 2019-02-15 19:02:11.823729168 +0300 +++ new/src/os/aix/vm/os_aix.inline.hpp 2019-02-15 19:02:11.675734353 +0300 @@ -92,19 +92,6 @@ inline const int os::default_file_open_flags() { return 0;} -inline DIR* os::opendir(const char* dirname) -{ - assert(dirname != NULL, "just checking"); - return ::opendir(dirname); -} - -inline int os::readdir_buf_size(const char *path) -{ - // according to aix sys/limits, NAME_MAX must be retrieved at runtime. */ - const long my_NAME_MAX = pathconf(path, _PC_NAME_MAX); - return my_NAME_MAX + sizeof(dirent) + 1; -} - inline jlong os::lseek(int fd, jlong offset, int whence) { return (jlong) ::lseek64(fd, offset, whence); } @@ -121,28 +108,6 @@ return ::ftruncate64(fd, length); } -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf) -{ - dirent* p; - int status; - assert(dirp != NULL, "just checking"); - - // NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX - // version. Here is the doc for this function: - // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html - - if((status = ::readdir_r(dirp, dbuf, &p)) != 0) { - errno = status; - return NULL; - } else - return p; -} - -inline int os::closedir(DIR *dirp) { - assert(dirp != NULL, "argument is NULL"); - return ::closedir(dirp); -} - // macros for restartable system calls #define RESTARTABLE(_cmd, _result) do { \ --- old/src/os/aix/vm/perfMemory_aix.cpp 2019-02-15 19:02:12.223715158 +0300 +++ new/src/os/aix/vm/perfMemory_aix.cpp 2019-02-15 19:02:12.063720762 +0300 @@ -612,9 +612,8 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; - while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { + while ((dentry = os::readdir(tmpdirp)) != NULL) { // check if the directory entry is a hsperfdata file if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) { @@ -648,9 +647,8 @@ } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; - while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { + while ((udentry = os::readdir(subdirp)) != NULL) { if (filename_to_pid(udentry->d_name) == vmid) { struct stat statbuf; @@ -694,11 +692,9 @@ } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -774,10 +770,8 @@ // loop under these conditions is dependent upon the implementation of // opendir/readdir. struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); - errno = 0; - while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { + while ((entry = os::readdir(dirp)) != NULL) { pid_t pid = filename_to_pid(entry->d_name); @@ -815,8 +809,6 @@ // Close the directory and reset the current working directory. close_directory_secure_cwd(dirp, saved_cwd_fd); - - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // Make the user specific temporary directory. Returns true if --- old/src/os/bsd/vm/os_bsd.cpp 2019-02-15 19:02:12.567703108 +0300 +++ new/src/os/bsd/vm/os_bsd.cpp 2019-02-15 19:02:12.443707452 +0300 @@ -1690,6 +1690,53 @@ #endif } +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { +#ifdef RTLD_DI_LINKMAP + Dl_info dli; + void *handle; + Link_map *map; + Link_map *p; + + if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || + dli.dli_fname == NULL) { + return 1; + } + handle = dlopen(dli.dli_fname, RTLD_LAZY); + if (handle == NULL) { + return 1; + } + dlinfo(handle, RTLD_DI_LINKMAP, &map); + if (map == NULL) { + dlclose(handle); + return 1; + } + + while (map->l_prev != NULL) + map = map->l_prev; + + while (map != NULL) { + // Value for top_address is returned as 0 since we don't have any information about module size + if (callback(map->l_name, (address)map->l_addr, (address)0, param)) { + dlclose(handle); + return 1; + } + map = map->l_next; + } + + dlclose(handle); +#elif defined(__APPLE__) + for (uint32_t i = 1; i < _dyld_image_count(); i++) { + // Value for top_address is returned as 0 since we don't have any information about module size + if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) { + return 1; + } + } + return 0; +#else + return 1; +#endif +} + void os::print_os_info_brief(outputStream* st) { st->print("Bsd"); @@ -2562,6 +2609,10 @@ RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes)); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset)); +} + // TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation. // Solaris uses poll(), bsd uses park(). // Poll() is likely a better choice, assuming that Thread.interrupt() @@ -3957,8 +4008,7 @@ /* Scan the directory */ bool result = true; - char buf[sizeof(struct dirent) + MAX_PATH]; - while (result && (ptr = ::readdir(dir)) != NULL) { + while (result && (ptr = readdir(dir)) != NULL) { if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { result = false; } --- old/src/os/bsd/vm/os_bsd.inline.hpp 2019-02-15 19:02:13.047686297 +0300 +++ new/src/os/bsd/vm/os_bsd.inline.hpp 2019-02-15 19:02:12.903691340 +0300 @@ -95,17 +95,6 @@ inline const int os::default_file_open_flags() { return 0;} -inline DIR* os::opendir(const char* dirname) -{ - assert(dirname != NULL, "just checking"); - return ::opendir(dirname); -} - -inline int os::readdir_buf_size(const char *path) -{ - return NAME_MAX + sizeof(dirent) + 1; -} - inline jlong os::lseek(int fd, jlong offset, int whence) { return (jlong) ::lseek(fd, offset, whence); } @@ -122,28 +111,6 @@ return ::ftruncate(fd, length); } -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf) -{ - dirent* p; - int status; - assert(dirp != NULL, "just checking"); - - // NOTE: Bsd readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX - // version. Here is the doc for this function: - // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html - - if((status = ::readdir_r(dirp, dbuf, &p)) != 0) { - errno = status; - return NULL; - } else - return p; -} - -inline int os::closedir(DIR *dirp) { - assert(dirp != NULL, "argument is NULL"); - return ::closedir(dirp); -} - // macros for restartable system calls #define RESTARTABLE(_cmd, _result) do { \ --- old/src/os/bsd/vm/perfMemory_bsd.cpp 2019-02-15 19:02:13.399673969 +0300 +++ new/src/os/bsd/vm/perfMemory_bsd.cpp 2019-02-15 19:02:13.279678172 +0300 @@ -533,9 +533,8 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; - while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { + while ((dentry = os::readdir(tmpdirp)) != NULL) { // check if the directory entry is a hsperfdata file if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) { @@ -557,9 +556,8 @@ } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; - while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { + while ((udentry = os::readdir(subdirp)) != NULL) { if (filename_to_pid(udentry->d_name) == vmid) { struct stat statbuf; @@ -603,11 +601,9 @@ } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -686,10 +682,8 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); - errno = 0; - while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { + while ((entry = os::readdir(dirp)) != NULL) { pid_t pid = filename_to_pid(entry->d_name); @@ -728,8 +722,6 @@ // close the directory and reset the current working directory close_directory_secure_cwd(dirp, saved_cwd_fd); - - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if --- old/src/os/linux/vm/os_linux.cpp 2019-02-15 19:02:13.811659539 +0300 +++ new/src/os/linux/vm/os_linux.cpp 2019-02-15 19:02:13.647665283 +0300 @@ -2141,6 +2141,41 @@ } } +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + FILE *procmapsFile = NULL; + + // Open the procfs maps file for the current process + if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) { + // Allocate PATH_MAX for file name plus a reasonable size for other fields. + char line[PATH_MAX + 100]; + + // Read line by line from 'file' + while (fgets(line, sizeof(line), procmapsFile) != NULL) { + u8 base, top, offset, inode; + char permissions[5]; + char device[6]; + char name[PATH_MAX + 1]; + + // Parse fields from line + sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %4s " UINT64_FORMAT_X " %5s " INT64_FORMAT " %s", + &base, &top, permissions, &offset, device, &inode, name); + + // Filter by device id '00:00' so that we only get file system mapped files. + if (strcmp(device, "00:00") != 0) { + + // Call callback with the fields of interest + if(callback(name, (address)base, (address)top, param)) { + // Oops abort, callback aborted + fclose(procmapsFile); + return 1; + } + } + } + fclose(procmapsFile); + } + return 0; +} + void os::print_os_info_brief(outputStream* st) { os::Linux::print_distro_info(st); @@ -4025,6 +4060,10 @@ return ::read(fd, buf, nBytes); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + return ::pread(fd, buf, nBytes, offset); +} + // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation. // Solaris uses poll(), linux uses park(). // Poll() is likely a better choice, assuming that Thread.interrupt() @@ -5485,8 +5524,7 @@ /* Scan the directory */ bool result = true; - char buf[sizeof(struct dirent) + MAX_PATH]; - while (result && (ptr = ::readdir(dir)) != NULL) { + while (result && (ptr = readdir(dir)) != NULL) { if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { result = false; } --- old/src/os/linux/vm/os_linux.inline.hpp 2019-02-15 19:02:14.403638807 +0300 +++ new/src/os/linux/vm/os_linux.inline.hpp 2019-02-15 19:02:14.251644130 +0300 @@ -87,17 +87,6 @@ inline const int os::default_file_open_flags() { return 0;} -inline DIR* os::opendir(const char* dirname) -{ - assert(dirname != NULL, "just checking"); - return ::opendir(dirname); -} - -inline int os::readdir_buf_size(const char *path) -{ - return NAME_MAX + sizeof(dirent) + 1; -} - inline jlong os::lseek(int fd, jlong offset, int whence) { return (jlong) ::lseek64(fd, offset, whence); } @@ -114,28 +103,6 @@ return ::ftruncate64(fd, length); } -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf) -{ - dirent* p; - int status; - assert(dirp != NULL, "just checking"); - - // NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX - // version. Here is the doc for this function: - // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html - - if((status = ::readdir_r(dirp, dbuf, &p)) != 0) { - errno = status; - return NULL; - } else - return p; -} - -inline int os::closedir(DIR *dirp) { - assert(dirp != NULL, "argument is NULL"); - return ::closedir(dirp); -} - // macros for restartable system calls #define RESTARTABLE(_cmd, _result) do { \ --- old/src/os/linux/vm/perfMemory_linux.cpp 2019-02-15 19:02:14.779625640 +0300 +++ new/src/os/linux/vm/perfMemory_linux.cpp 2019-02-15 19:02:14.623631104 +0300 @@ -34,6 +34,7 @@ #include "utilities/exceptions.hpp" // put OS-includes here +#include # include # include # include @@ -533,9 +534,8 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; - while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { + while ((dentry = os::readdir(tmpdirp)) != NULL) { // check if the directory entry is a hsperfdata file if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) { @@ -569,9 +569,8 @@ } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; - while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { + while ((udentry = os::readdir(subdirp)) != NULL) { if (filename_to_pid(udentry->d_name) == vmid) { struct stat statbuf; @@ -615,11 +614,9 @@ } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -698,10 +695,8 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); - errno = 0; - while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { + while ((entry = os::readdir(dirp)) != NULL) { pid_t pid = filename_to_pid(entry->d_name); @@ -738,8 +733,6 @@ // close the directory and reset the current working directory close_directory_secure_cwd(dirp, saved_cwd_fd); - - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if --- old/src/os/posix/vm/os_posix.cpp 2019-02-15 19:02:15.195611072 +0300 +++ new/src/os/posix/vm/os_posix.cpp 2019-02-15 19:02:15.035616675 +0300 @@ -302,6 +302,21 @@ return ::fdopen(fd, mode); } +DIR* os::opendir(const char* dirname) { + assert(dirname != NULL, "just checking"); + return ::opendir(dirname); +} + +struct dirent* os::readdir(DIR* dirp) { + assert(dirp != NULL, "just checking"); + return ::readdir(dirp); +} + +int os::closedir(DIR *dirp) { + assert(dirp != NULL, "just checking"); + return ::closedir(dirp); +} + // Builds a platform dependent Agent_OnLoad_ function name // which is used to find statically linked in agents. // Parameters: --- old/src/os/solaris/vm/os_solaris.cpp 2019-02-15 19:02:15.567598047 +0300 +++ new/src/os/solaris/vm/os_solaris.cpp 2019-02-15 19:02:15.443602389 +0300 @@ -1809,6 +1809,43 @@ dlclose(handle); } +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + Dl_info dli; + // Sanity check? + if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 || + dli.dli_fname == NULL) { + return 1; + } + + void * handle = dlopen(dli.dli_fname, RTLD_LAZY); + if (handle == NULL) { + return 1; + } + + Link_map *map; + dlinfo(handle, RTLD_DI_LINKMAP, &map); + if (map == NULL) { + dlclose(handle); + return 1; + } + + while (map->l_prev != NULL) { + map = map->l_prev; + } + + while (map != NULL) { + // Iterate through all map entries and call callback with fields of interest + if(callback(map->l_name, (address)map->l_addr, (address)0, param)) { + dlclose(handle); + return 1; + } + map = map->l_next; + } + + dlclose(handle); + return 0; +} + // Loads .dll/.so and // in case of error it checks if .dll/.so was built for the // same architecture as Hotspot is running on @@ -3251,6 +3288,15 @@ INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + size_t res; + JavaThread* thread = (JavaThread*)Thread::current(); + assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm"); + ThreadBlockInVM tbiv(thread); + RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res); + return res; +} + size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); } @@ -5148,9 +5194,7 @@ /* Scan the directory */ bool result = true; - char buf[sizeof(struct dirent) + MAX_PATH]; - struct dirent *dbuf = (struct dirent *) buf; - while (result && (ptr = readdir(dir, dbuf)) != NULL) { + while (result && (ptr = readdir(dirf)) != NULL) { if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { result = false; } --- old/src/os/solaris/vm/os_solaris.inline.hpp 2019-02-15 19:02:16.075580259 +0300 +++ new/src/os/solaris/vm/os_solaris.inline.hpp 2019-02-15 19:02:15.959584320 +0300 @@ -71,37 +71,6 @@ } inline void os::dll_unload(void *lib) { ::dlclose(lib); } -inline DIR* os::opendir(const char* dirname) { - assert(dirname != NULL, "just checking"); - return ::opendir(dirname); -} - -inline int os::readdir_buf_size(const char *path) { - int size = pathconf(path, _PC_NAME_MAX); - return (size < 0 ? MAXPATHLEN : size) + sizeof(dirent) + 1; -} - -inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) { - assert(dirp != NULL, "just checking"); -#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 - dirent* p; - int status; - - if((status = ::readdir_r(dirp, dbuf, &p)) != 0) { - errno = status; - return NULL; - } else - return p; -#else // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 - return ::readdir_r(dirp, dbuf); -#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 -} - -inline int os::closedir(DIR *dirp) { - assert(dirp != NULL, "argument is NULL"); - return ::closedir(dirp); -} - ////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// --- old/src/os/solaris/vm/perfMemory_solaris.cpp 2019-02-15 19:02:16.439567514 +0300 +++ new/src/os/solaris/vm/perfMemory_solaris.cpp 2019-02-15 19:02:16.279573116 +0300 @@ -524,9 +524,8 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; - while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { + while ((dentry = os::readdir(tmpdirp)) != NULL) { // check if the directory entry is a hsperfdata file if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) { @@ -560,9 +559,8 @@ } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; - while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { + while ((udentry = os::readdir(subdirp)) != NULL) { if (filename_to_pid(udentry->d_name) == vmid) { struct stat statbuf; @@ -606,11 +604,9 @@ } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -737,10 +733,8 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); - errno = 0; - while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { + while ((entry = os::readdir(dirp)) != NULL) { pid_t pid = filename_to_pid(entry->d_name); @@ -779,8 +773,6 @@ // close the directory and reset the current working directory close_directory_secure_cwd(dirp, saved_cwd_fd); - - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if --- old/src/os/windows/vm/os_windows.cpp 2019-02-15 19:02:16.843553369 +0300 +++ new/src/os/windows/vm/os_windows.cpp 2019-02-15 19:02:16.691558691 +0300 @@ -1151,25 +1151,22 @@ return dirp; } -/* parameter dbuf unused on Windows */ - -struct dirent * -os::readdir(DIR *dirp, dirent *dbuf) +struct dirent *os::readdir(DIR *dirp) { assert(dirp != NULL, "just checking"); // hotspot change if (dirp->handle == INVALID_HANDLE_VALUE) { - return 0; + return NULL; } strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); - if (!FindNextFile(dirp->handle, &dirp->find_data)) { - if (GetLastError() == ERROR_INVALID_HANDLE) { - errno = EBADF; - return 0; - } - FindClose(dirp->handle); - dirp->handle = INVALID_HANDLE_VALUE; + if (!FindNextFile(dirp->handle, &dirp->find_data)) { + if (GetLastError() == ERROR_INVALID_HANDLE) { + errno = EBADF; + return NULL; + } + FindClose(dirp->handle); + dirp->handle = INVALID_HANDLE_VALUE; } return &dirp->dirent; @@ -1650,6 +1647,50 @@ enumerate_modules(pid, _print_module, (void *)st); } +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + HANDLE hProcess; + +# define MAX_NUM_MODULES 128 + HMODULE modules[MAX_NUM_MODULES]; + static char filename[MAX_PATH]; + int result = 0; + + int pid = os::current_process_id(); + hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, + FALSE, pid); + if (hProcess == NULL) return 0; + + DWORD size_needed; + if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { + CloseHandle(hProcess); + return 0; + } + + // number of modules that are currently loaded + int num_modules = size_needed / sizeof(HMODULE); + + for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { + // Get Full pathname: + if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { + filename[0] = '\0'; + } + + MODULEINFO modinfo; + if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { + modinfo.lpBaseOfDll = NULL; + modinfo.SizeOfImage = 0; + } + + // Invoke callback function + result = callback(filename, (address)modinfo.lpBaseOfDll, + (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); + if (result) break; + } + + CloseHandle(hProcess); + return result; +} + void os::print_os_info_brief(outputStream* st) { os::print_os_info(st); } @@ -4327,6 +4368,22 @@ return (jlong) ::_lseeki64(fd, offset, whence); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + OVERLAPPED ov; + DWORD nread; + BOOL result; + + ZeroMemory(&ov, sizeof(ov)); + ov.Offset = (DWORD)offset; + ov.OffsetHigh = (DWORD)(offset >> 32); + + HANDLE h = (HANDLE)::_get_osfhandle(fd); + + result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); + + return result ? nread : 0; +} + // This method is a slightly reworked copy of JDK's sysNativePath // from src/windows/hpi/src/path_md.c --- old/src/os/windows/vm/os_windows.inline.hpp 2019-02-15 19:02:17.379534604 +0300 +++ new/src/os/windows/vm/os_windows.inline.hpp 2019-02-15 19:02:17.287537824 +0300 @@ -65,14 +65,6 @@ return true; } -inline int os::readdir_buf_size(const char *path) -{ - /* As Windows doesn't use the directory entry buffer passed to - os::readdir() this can be as short as possible */ - - return 1; -} - // Bang the shadow pages if they need to be touched to be mapped. inline void os::bang_stack_shadow_pages() { // Write to each page of our new frame to force OS mapping. --- old/src/os/windows/vm/perfMemory_windows.cpp 2019-02-15 19:02:17.799519900 +0300 +++ new/src/os/windows/vm/perfMemory_windows.cpp 2019-02-15 19:02:17.667524521 +0300 @@ -316,9 +316,8 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; - while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { + while ((dentry = os::readdir(tmpdirp)) != NULL) { // check if the directory entry is a hsperfdata file if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) { @@ -351,9 +350,8 @@ } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; - while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { + while ((udentry = os::readdir(subdirp)) != NULL) { if (filename_to_pid(udentry->d_name) == vmid) { struct stat statbuf; @@ -405,11 +403,9 @@ } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(latest_user); } @@ -639,9 +635,8 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); errno = 0; - while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { + while ((entry = os::readdir(dirp)) != NULL) { int pid = filename_to_pid(entry->d_name); @@ -682,7 +677,6 @@ errno = 0; } os::closedir(dirp); - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // create a file mapping object with the requested name, and size --- old/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp 2019-02-15 19:02:18.139507997 +0300 +++ new/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp 2019-02-15 19:02:18.027511918 +0300 @@ -24,13 +24,64 @@ */ #include "precompiled.hpp" -#include "runtime/frame.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/thread.hpp" +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + assert(this->is_Java_thread(), "must be JavaThread"); + + // If we have a last_Java_frame, then we should use it even if + // isInJava == true. It should be more reliable than ucontext info. + if (has_last_Java_frame() && frame_anchor()->walkable()) { + *fr_addr = pd_last_frame(); + return true; + } + + // At this point, we don't have a last_Java_frame, so + // we try to glean some information out of the ucontext + // if we were running Java code when SIGPROF came in. + if (isInJava) { + ucontext_t* uc = (ucontext_t*) ucontext; + frame ret_frame((intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/], + (address)uc->uc_mcontext.regs->nip); + + if (ret_frame.pc() == NULL) { + // ucontext wasn't useful + return false; + } + + if (ret_frame.is_interpreted_frame()) { + frame::ijava_state* istate = ret_frame.get_ijava_state(); + if (!((Method*)(istate->method))->is_metaspace_object()) { + return false; + } + uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/]; + uint64_t istate_bcp = istate->bcp; + uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base()); + uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size()); + if (istate_bcp >= code_start && istate_bcp < code_end) { + // we have a valid bcp, don't touch it, do nothing + } else if (reg_bcp >= code_start && reg_bcp < code_end) { + istate->bcp = reg_bcp; + } else { + return false; + } + } + if (!ret_frame.safe_for_sender(this)) { + // nothing else to try if the frame isn't good + return false; + } + *fr_addr = ret_frame; + return true; + } + // nothing else to try + return false; +} + // Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC. bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { - Unimplemented(); - return false; + assert(this->is_Java_thread(), "must be JavaThread"); + return pd_get_top_frame_for_profiling(fr_addr, ucontext, isInJava); } void JavaThread::cache_global_variables() { } --- old/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp 2019-02-15 19:02:18.475496235 +0300 +++ new/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp 2019-02-15 19:02:18.339500996 +0300 @@ -62,6 +62,8 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); + protected: // -Xprof support --- old/src/os_cpu/solaris_x86/vm/os_solaris_x86.inline.hpp 2019-02-15 19:02:18.791485173 +0300 +++ new/src/os_cpu/solaris_x86/vm/os_solaris_x86.inline.hpp 2019-02-15 19:02:18.659489793 +0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,8 @@ #include "runtime/os.hpp" +extern "C" jlong _raw_rdtsc(); // In .il file + inline jlong os::rdtsc() { return _raw_rdtsc(); } #endif // OS_CPU_SOLARIS_X86_VM_OS_SOLARIS_X86_INLINE_HPP --- old/src/share/vm/c1/c1_GraphBuilder.cpp 2019-02-15 19:02:19.103474251 +0300 +++ new/src/share/vm/c1/c1_GraphBuilder.cpp 2019-02-15 19:02:18.987478312 +0300 @@ -34,6 +34,7 @@ #include "ci/ciMemberName.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" +#include "jfr/jfrEvents.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/compilationPolicy.hpp" #include "utilities/bitMap.inline.hpp" @@ -3460,10 +3461,16 @@ if (!InlineArrayCopy) return false; break; -#ifdef TRACE_HAVE_INTRINSICS - case vmIntrinsics::_classID: - case vmIntrinsics::_threadID: - preserves_state = true; +#ifdef JFR_HAVE_INTRINSICS +#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT) + case vmIntrinsics::_getClassId: + preserves_state = false; + cantrap = false; + break; +#endif + + case vmIntrinsics::_getEventWriter: + preserves_state = false; cantrap = true; break; @@ -4396,6 +4403,30 @@ } +static void post_inlining_event(EventCompilerInlining* event, + int compile_id, + const char* msg, + bool success, + int bci, + ciMethod* caller, + ciMethod* callee) { + assert(caller != NULL, "invariant"); + assert(callee != NULL, "invariant"); + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + JfrStructCalleeMethod callee_struct; + callee_struct.set_type(callee->holder()->name()->as_utf8()); + callee_struct.set_name(callee->name()->as_utf8()); + callee_struct.set_descriptor(callee->signature()->as_symbol()->as_utf8()); + event->set_compileId(compile_id); + event->set_message(msg); + event->set_succeeded(success); + event->set_bci(bci); + event->set_caller(caller->get_Method()); + event->set_callee(callee_struct); + event->commit(); +} + void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { CompileLog* log = compilation()->log(); if (log != NULL) { @@ -4412,6 +4443,11 @@ } } + EventCompilerInlining event; + if (event.should_commit()) { + post_inlining_event(&event, compilation()->env()->task()->compile_id(), msg, success, bci(), method(), callee); + } + if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) { return; } --- old/src/share/vm/c1/c1_LIRGenerator.cpp 2019-02-15 19:02:19.471461370 +0300 +++ new/src/share/vm/c1/c1_LIRGenerator.cpp 2019-02-15 19:02:19.371464870 +0300 @@ -3055,6 +3055,51 @@ __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); } +#ifdef JFR_HAVE_INTRINSICS +void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { + CodeEmitInfo* info = state_for(x); + CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check + + assert(info != NULL, "must have info"); + LIRItem arg(x->argument_at(0), this); + + arg.load_item(); + LIR_Opr klass = new_register(T_METADATA); + __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info); + LIR_Opr id = new_register(T_LONG); + ByteSize offset = KLASS_TRACE_ID_OFFSET; + LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG); + + __ move(trace_id_addr, id); + __ logical_or(id, LIR_OprFact::longConst(0x01l), id); + __ store(id, trace_id_addr); + +#ifdef TRACE_ID_META_BITS + __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id); +#endif +#ifdef TRACE_ID_SHIFT + __ unsigned_shift_right(id, TRACE_ID_SHIFT, id); +#endif + + __ move(id, rlock_result(x)); +} + +void LIRGenerator::do_getEventWriter(Intrinsic* x) { + LabelObj* L_end = new LabelObj(); + + LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(), + in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR), + T_OBJECT); + LIR_Opr result = rlock_result(x); + __ move_wide(jobj_addr, result); + __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_OBJECT, L_end->label()); + __ move_wide(new LIR_Address(result, T_OBJECT), result); + + __ branch_destination(L_end->label()); +} +#endif + void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) { assert(x->number_of_arguments() == expected_arguments, "wrong type"); LIR_Opr reg = result_register_for(x->type()); @@ -3111,11 +3156,15 @@ break; } -#ifdef TRACE_HAVE_INTRINSICS - case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break; - case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break; +#ifdef JFR_HAVE_INTRINSICS + case vmIntrinsics::_getClassId: + do_ClassIDIntrinsic(x); + break; + case vmIntrinsics::_getEventWriter: + do_getEventWriter(x); + break; case vmIntrinsics::_counterTime: - do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x); + do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), 0, x); break; #endif --- old/src/share/vm/c1/c1_LIRGenerator.hpp 2019-02-15 19:02:19.863447649 +0300 +++ new/src/share/vm/c1/c1_LIRGenerator.hpp 2019-02-15 19:02:19.755451430 +0300 @@ -28,6 +28,7 @@ #include "c1/c1_Instruction.hpp" #include "c1/c1_LIR.hpp" #include "ci/ciMethodData.hpp" +#include "jfr/support/jfrIntrinsics.hpp" #include "utilities/sizes.hpp" // The classes responsible for code emission and register allocation @@ -436,9 +437,9 @@ void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux); void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x); -#ifdef TRACE_HAVE_INTRINSICS - void do_ThreadIDIntrinsic(Intrinsic* x); +#ifdef JFR_HAVE_INTRINSICS void do_ClassIDIntrinsic(Intrinsic* x); + void do_getEventWriter(Intrinsic* x); #endif ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, --- old/src/share/vm/c1/c1_Runtime1.cpp 2019-02-15 19:02:20.075440228 +0300 +++ new/src/share/vm/c1/c1_Runtime1.cpp 2019-02-15 19:02:19.987443309 +0300 @@ -41,6 +41,7 @@ #include "gc_interface/collectedHeap.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" +#include "jfr/support/jfrIntrinsics.hpp" #include "memory/allocation.inline.hpp" #include "memory/barrierSet.hpp" #include "memory/oopFactory.hpp" @@ -296,8 +297,8 @@ FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); FUNCTION_CASE(entry, is_instance_of); FUNCTION_CASE(entry, trace_block_entry); -#ifdef TRACE_HAVE_INTRINSICS - FUNCTION_CASE(entry, TRACE_TIME_METHOD); +#ifdef JFR_HAVE_INTRINSICS + FUNCTION_CASE(entry, JFR_TIME_FUNCTION); #endif FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32()); --- old/src/share/vm/ci/ciEnv.cpp 2019-02-15 19:02:20.319431689 +0300 +++ new/src/share/vm/ci/ciEnv.cpp 2019-02-15 19:02:20.227434908 +0300 @@ -40,6 +40,7 @@ #include "compiler/compilerOracle.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/linkResolver.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.inline.hpp" --- old/src/share/vm/ci/ciMethod.hpp 2019-02-15 19:02:20.535424130 +0300 +++ new/src/share/vm/ci/ciMethod.hpp 2019-02-15 19:02:20.443427349 +0300 @@ -93,12 +93,6 @@ ciMethod(methodHandle h_m, ciInstanceKlass* holder); ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor); - Method* get_Method() const { - Method* m = (Method*)_metadata; - assert(m != NULL, "illegal use of unloaded method"); - return m; - } - oop loader() const { return _holder->loader(); } const char* type_string() { return "ciMethod"; } @@ -156,6 +150,11 @@ } } + Method* get_Method() const { + Method* m = (Method*)_metadata; + assert(m != NULL, "illegal use of unloaded method"); + return m; + } // Method code and related information. address code() { if (_code == NULL) load_code(); return _code; } --- old/src/share/vm/classfile/classFileParser.cpp 2019-02-15 19:02:20.751416570 +0300 +++ new/src/share/vm/classfile/classFileParser.cpp 2019-02-15 19:02:20.651420070 +0300 @@ -3884,14 +3884,14 @@ access_flags.set_flags(flags); // This class and superclass - u2 this_class_index = cfs->get_u2_fast(); + _this_class_index = cfs->get_u2_fast(); check_property( - valid_cp_range(this_class_index, cp_size) && - cp->tag_at(this_class_index).is_unresolved_klass(), + valid_cp_range(_this_class_index, cp_size) && + cp->tag_at(_this_class_index).is_unresolved_klass(), "Invalid this class index %u in constant pool in class file %s", - this_class_index, CHECK_(nullHandle)); + _this_class_index, CHECK_(nullHandle)); - Symbol* class_name = cp->unresolved_klass_at(this_class_index); + Symbol* class_name = cp->unresolved_klass_at(_this_class_index); assert(class_name != NULL, "class_name can't be null"); // It's important to set parsed_name *before* resolving the super class. @@ -4122,9 +4122,9 @@ // that changes, then InstanceKlass::idnum_can_increment() // has to be changed accordingly. this_klass->set_initial_method_idnum(methods->length()); - this_klass->set_name(cp->klass_name_at(this_class_index)); + this_klass->set_name(cp->klass_name_at(_this_class_index)); if (is_anonymous()) // I am well known to myself - cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve + cp->klass_at_put(_this_class_index, this_klass()); // eagerly resolve this_klass->set_minor_version(minor_version); this_klass->set_major_version(major_version); @@ -4265,6 +4265,8 @@ preserve_this_klass = this_klass(); } + JFR_ONLY(INIT_ID(preserve_this_klass);) + // Create new handle outside HandleMark (might be needed for // Extended Class Redefinition) instanceKlassHandle this_klass (THREAD, preserve_this_klass); @@ -5300,3 +5302,25 @@ } return NULL; } + +#if INCLUDE_JFR + +// Caller responsible for ResourceMark +// clone stream with rewound position +ClassFileStream* ClassFileParser::clone_stream() const { + assert(_stream != NULL, "invariant"); + + return _stream->clone(); +} + +void ClassFileParser::set_klass_to_deallocate(InstanceKlass* klass) { +#ifdef ASSERT + if (klass != NULL) { + assert(NULL == _klass, "leaking?"); + } +#endif + + _klass = klass; +} + +#endif // INCLUDE_JFR --- old/src/share/vm/classfile/classFileParser.hpp 2019-02-15 19:02:21.175401730 +0300 +++ new/src/share/vm/classfile/classFileParser.hpp 2019-02-15 19:02:21.083404950 +0300 @@ -47,6 +47,7 @@ bool _relax_verify; u2 _major_version; u2 _minor_version; + u2 _this_class_index; Symbol* _class_name; ClassLoaderData* _loader_data; KlassHandle _host_klass; @@ -491,6 +492,13 @@ static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS); static void check_final_method_override(instanceKlassHandle this_klass, TRAPS); static void check_illegal_static_method(instanceKlassHandle this_klass, TRAPS); + + u2 this_class_index() const { return _this_class_index; } + +#if INCLUDE_JFR + ClassFileStream* clone_stream() const; + void set_klass_to_deallocate(InstanceKlass* klass); +#endif // INCLUDE_JFR }; #endif // SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP --- old/src/share/vm/classfile/classFileStream.cpp 2019-02-15 19:02:21.439392491 +0300 +++ new/src/share/vm/classfile/classFileStream.cpp 2019-02-15 19:02:21.331396271 +0300 @@ -30,12 +30,12 @@ THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file"); } -ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) { +ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source, bool need_verify) { _buffer_start = buffer; _buffer_end = buffer + length; _current = buffer; _source = source; - _need_verify = false; + _need_verify = need_verify; } u1 ClassFileStream::get_u1(TRAPS) { @@ -100,3 +100,31 @@ } _current += length * 4; } + +#if INCLUDE_JFR + +u1* ClassFileStream::clone_buffer() const { + u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length()); + memcpy(new_buffer_start, _buffer_start, length()); + return new_buffer_start; +} + +const char* const ClassFileStream::clone_source() const { + const char* const src = source(); + char* source_copy = NULL; + if (src != NULL) { + size_t source_len = strlen(src); + source_copy = NEW_RESOURCE_ARRAY(char, source_len + 1); + strncpy(source_copy, src, source_len + 1); + } + return source_copy; +} + +ClassFileStream* ClassFileStream::clone() const { + u1* const new_buffer_start = clone_buffer(); + return new ClassFileStream(new_buffer_start, + length(), + clone_source(), + need_verify()); +} +#endif // INCLUDE_JFR --- old/src/share/vm/classfile/classFileStream.hpp 2019-02-15 19:02:21.687383812 +0300 +++ new/src/share/vm/classfile/classFileStream.hpp 2019-02-15 19:02:21.595387031 +0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,15 +57,25 @@ bool _need_verify; // True if verification is on for the class file void truncated_file_error(TRAPS); + +#if INCLUDE_JFR + u1* clone_buffer() const; + const char* const clone_source() const; +#endif + public: // Constructor - ClassFileStream(u1* buffer, int length, const char* source); + ClassFileStream(u1* buffer, int length, const char* source, bool need_verify = false); // Buffer access u1* buffer() const { return _buffer_start; } int length() const { return _buffer_end - _buffer_start; } u1* current() const { return _current; } void set_current(u1* pos) { _current = pos; } + // for relative positioning + juint current_offset() const { + return (juint)(_current - _buffer_start); + } const char* source() const { return _source; } void set_verify(bool flag) { _need_verify = flag; } @@ -140,6 +150,12 @@ // Tells whether eos is reached bool at_eos() const { return _current == _buffer_end; } + +#if INCLUDE_JFR + ClassFileStream* clone() const; + + bool need_verify() const { return _need_verify; } +#endif }; #endif // SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP --- old/src/share/vm/classfile/classLoader.cpp 2019-02-15 19:02:21.935375133 +0300 +++ new/src/share/vm/classfile/classLoader.cpp 2019-02-15 19:02:21.839378492 +0300 @@ -1134,6 +1134,15 @@ } return h; } + +#if INCLUDE_JFR + { + InstanceKlass* ik = result(); + ON_KLASS_CREATION(ik, parser, THREAD); + result = instanceKlassHandle(ik); + } +#endif + h = context.record_result(classpath_index, e, result, THREAD); } else { if (DumpSharedSpaces) { --- old/src/share/vm/classfile/classLoaderData.cpp 2019-02-15 19:02:22.239364494 +0300 +++ new/src/share/vm/classfile/classLoaderData.cpp 2019-02-15 19:02:22.131368274 +0300 @@ -64,8 +64,10 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" -#if INCLUDE_TRACE -#include "trace/tracing.hpp" +#include "utilities/ticks.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#include "jfr/jfrEvents.hpp" #endif ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; @@ -81,7 +83,8 @@ _claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL), _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { - // empty + + JFR_ONLY(INIT_ID(this);) } void ClassLoaderData::init_dependencies(TRAPS) { @@ -646,6 +649,16 @@ } } +void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + // Only walk the head until any clds not purged from prior unloading + // (CMS doesn't purge right away). + for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) { + assert(cld->is_unloading(), "invariant"); + cl->do_cld(cld); + } +} + void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) { CLDClosure* closure = cld->keep_alive() ? strong : weak; @@ -740,6 +753,28 @@ } #endif // PRODUCT +#if INCLUDE_JFR +static Ticks class_unload_time; +static void post_class_unload_event(Klass* const k) { + assert(k != NULL, "invariant"); + EventClassUnload event(UNTIMED); + event.set_endtime(class_unload_time); + event.set_unloadedClass(k); + event.set_definingClassLoader(k->class_loader_data()); + event.commit(); +} + +static void post_class_unload_events() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + if (Jfr::is_enabled()) { + if (EventClassUnload::is_enabled()) { + class_unload_time = Ticks::now(); + ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event); + } + Jfr::on_unloading_classes(); + } +} +#endif // INCLUDE_JFR // Move class loader data from main list to the unloaded list for unloading // and deallocation later. @@ -781,7 +816,7 @@ } if (seen_dead_loader) { - post_class_unload_events(); + JFR_ONLY(post_class_unload_events();) } return seen_dead_loader; @@ -820,20 +855,6 @@ Metaspace::purge(); } -void ClassLoaderDataGraph::post_class_unload_events(void) { -#if INCLUDE_TRACE - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); - if (Tracing::enabled()) { - if (Tracing::is_event_enabled(TraceClassUnloadEvent)) { - assert(_unloading != NULL, "need class loader data unload list!"); - _class_unload_time = Ticks::now(); - classes_unloading_do(&class_unload_event); - } - Tracing::on_unloading_classes(); - } -#endif -} - void ClassLoaderDataGraph::free_deallocate_lists() { for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { // We need to keep this data until InstanceKlass::purge_previous_version has been @@ -969,21 +990,3 @@ class_loader()->print_value_on(out); } } - -#if INCLUDE_TRACE - -Ticks ClassLoaderDataGraph::_class_unload_time; - -void ClassLoaderDataGraph::class_unload_event(Klass* const k) { - - // post class unload event - EventClassUnload event(UNTIMED); - event.set_endtime(_class_unload_time); - event.set_unloadedClass(k); - oop defining_class_loader = k->class_loader(); - event.set_definingClassLoader(defining_class_loader != NULL ? - defining_class_loader->klass() : (Klass*)NULL); - event.commit(); -} - -#endif // INCLUDE_TRACE --- old/src/share/vm/classfile/classLoaderData.hpp 2019-02-15 19:02:22.515354837 +0300 +++ new/src/share/vm/classfile/classLoaderData.hpp 2019-02-15 19:02:22.411358476 +0300 @@ -32,8 +32,8 @@ #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" -#if INCLUDE_TRACE -#include "utilities/ticks.hpp" +#if INCLUDE_JFR +#include "jfr/support/jfrTraceIdExtension.hpp" #endif // @@ -70,7 +70,6 @@ static bool _should_purge; static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); - static void post_class_unload_events(void); static void clean_metaspaces(); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); @@ -82,6 +81,7 @@ static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); // cld do static void cld_do(CLDClosure* cl); + static void cld_unloading_do(CLDClosure* cl); static void roots_cld_do(CLDClosure* strong, CLDClosure* weak); static void keep_alive_cld_do(CLDClosure* cl); static void always_strong_cld_do(CLDClosure* cl); @@ -116,12 +116,6 @@ #ifndef PRODUCT static bool contains_loader_data(ClassLoaderData* loader_data); #endif - -#if INCLUDE_TRACE - private: - static Ticks _class_unload_time; - static void class_unload_event(Klass* const k); -#endif }; // ClassLoaderData class @@ -213,6 +207,8 @@ static Metaspace* _ro_metaspace; static Metaspace* _rw_metaspace; + JFR_ONLY(DEFINE_TRACE_ID_FIELD;) + void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } @@ -223,11 +219,6 @@ Mutex* metaspace_lock() const { return _metaspace_lock; } - // GC interface. - void clear_claimed() { _claimed = 0; } - bool claimed() const { return _claimed == 1; } - bool claim(); - void unload(); bool keep_alive() const { return _keep_alive; } void classes_do(void f(Klass*)); @@ -242,6 +233,11 @@ public: + // GC interface. + void clear_claimed() { _claimed = 0; } + bool claimed() const { return _claimed == 1; } + bool claim(); + bool is_alive(BoolObjectClosure* is_alive_closure) const; // Accessors @@ -325,6 +321,8 @@ Metaspace* ro_metaspace(); Metaspace* rw_metaspace(); void initialize_shared_metaspaces(); + + JFR_ONLY(DEFINE_TRACE_ID_METHODS;) }; // An iterator that distributes Klasses to parallel worker threads. --- old/src/share/vm/classfile/systemDictionary.cpp 2019-02-15 19:02:22.799344898 +0300 +++ new/src/share/vm/classfile/systemDictionary.cpp 2019-02-15 19:02:22.703348258 +0300 @@ -38,6 +38,7 @@ #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/interpreter.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/filemap.hpp" #include "memory/gcLocker.hpp" #include "memory/oopFactory.hpp" @@ -64,9 +65,6 @@ #include "services/threadService.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" -#if INCLUDE_TRACE -#include "trace/tracing.hpp" -#endif Dictionary* SystemDictionary::_dictionary = NULL; PlaceholderTable* SystemDictionary::_placeholders = NULL; @@ -598,6 +596,22 @@ return (nh); } +// utility function for class load event +static void post_class_load_event(EventClassLoad &event, + instanceKlassHandle k, + Handle initiating_loader) { +#if INCLUDE_JFR + if (event.should_commit()) { + event.set_loadedClass(k()); + event.set_definingClassLoader(k->class_loader_data()); + oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader(); + event.set_initiatingClassLoader(class_loader != NULL ? + ClassLoaderData::class_loader_data_or_null(class_loader) : + (ClassLoaderData*)NULL); + event.commit(); + } +#endif // INCLUDE_JFR +} Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, @@ -606,7 +620,7 @@ assert(name != NULL && !FieldType::is_array(name) && !FieldType::is_obj(name), "invalid class name"); - Ticks class_load_start_time = Ticks::now(); + EventClassLoad class_load_start_event; // UseNewReflection // Fix for 4474172; see evaluation for more details @@ -848,7 +862,7 @@ return NULL; } - post_class_load_event(class_load_start_time, k, class_loader); + post_class_load_event(class_load_start_event, k, class_loader); #ifdef ASSERT { @@ -973,7 +987,7 @@ TRAPS) { TempNewSymbol parsed_name = NULL; - Ticks class_load_start_time = Ticks::now(); + EventClassLoad class_load_start_event; ClassLoaderData* loader_data; if (host_klass.not_null()) { @@ -1034,7 +1048,7 @@ JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } - post_class_load_event(class_load_start_time, k, class_loader); + post_class_load_event(class_load_start_event, k, class_loader); } assert(host_klass.not_null() || cp_patches == NULL, "cp_patches only found with host_klass"); @@ -1076,12 +1090,13 @@ // // Note: "name" is updated. - instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, - loader_data, - protection_domain, - parsed_name, - verify, - THREAD); + ClassFileParser parser(st); + instanceKlassHandle k = parser.parseClassFile(class_name, + loader_data, + protection_domain, + parsed_name, + verify, + THREAD); const char* pkg = "java/"; size_t pkglen = strlen(pkg); @@ -1116,6 +1131,14 @@ assert(is_internal_format(parsed_name), "external class name format used internally"); +#if INCLUDE_JFR + { + InstanceKlass* ik = k(); + ON_KLASS_CREATION(ik, parser, THREAD); + k = instanceKlassHandle(ik); + } +#endif + // Add class just loaded // If a class loader supports parallel classloading handle parallel define requests // find_or_define_instance_class may return a different InstanceKlass @@ -1376,6 +1399,15 @@ } } +static void post_class_define_event(InstanceKlass* k, const ClassLoaderData* def_cld) { + EventClassDefine event; + if (event.should_commit()) { + event.set_definedClass(k); + event.set_definingClassLoader(def_cld); + event.commit(); + } +} + void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ClassLoaderData* loader_data = k->class_loader_data(); @@ -1446,6 +1478,7 @@ } + post_class_define_event(k(), loader_data); } // Support parallel classloading @@ -2678,26 +2711,6 @@ constraints()->verify(dictionary(), placeholders()); } -// utility function for class load event -void SystemDictionary::post_class_load_event(const Ticks& start_time, - instanceKlassHandle k, - Handle initiating_loader) { -#if INCLUDE_TRACE - EventClassLoad event(UNTIMED); - if (event.should_commit()) { - event.set_starttime(start_time); - event.set_loadedClass(k()); - oop defining_class_loader = k->class_loader(); - event.set_definingClassLoader(defining_class_loader != NULL ? - defining_class_loader->klass() : (Klass*)NULL); - oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader(); - event.set_initiatingClassLoader(class_loader != NULL ? - class_loader->klass() : (Klass*)NULL); - event.commit(); - } -#endif // INCLUDE_TRACE -} - #ifndef PRODUCT // statistics code --- old/src/share/vm/classfile/systemDictionary.hpp 2019-02-15 19:02:23.143332862 +0300 +++ new/src/share/vm/classfile/systemDictionary.hpp 2019-02-15 19:02:23.047336220 +0300 @@ -77,7 +77,6 @@ template class HashtableBucket; class ResolutionErrorTable; class SymbolPropertyTable; -class Ticks; // Certain classes are preloaded, such as java.lang.Object and java.lang.String. // They are all "well-known", in the sense that no class loader is allowed @@ -654,9 +653,6 @@ // Setup link to hierarchy static void add_to_hierarchy(instanceKlassHandle k, TRAPS); - // event based tracing - static void post_class_load_event(const Ticks& start_time, instanceKlassHandle k, - Handle initiating_loader); // We pass in the hashtable index so we can calculate it outside of // the SystemDictionary_lock. --- old/src/share/vm/classfile/vmSymbols.cpp 2019-02-15 19:02:23.395324044 +0300 +++ new/src/share/vm/classfile/vmSymbols.cpp 2019-02-15 19:02:23.291327684 +0300 @@ -328,7 +328,7 @@ bool vmIntrinsics::should_be_pinned(vmIntrinsics::ID id) { assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); switch(id) { -#ifdef TRACE_HAVE_INTRINSICS +#ifdef JFR_HAVE_INTRINSICS case vmIntrinsics::_counterTime: #endif case vmIntrinsics::_currentTimeMillis: --- old/src/share/vm/classfile/vmSymbols.hpp 2019-02-15 19:02:23.619316207 +0300 +++ new/src/share/vm/classfile/vmSymbols.hpp 2019-02-15 19:02:23.519319706 +0300 @@ -25,9 +25,10 @@ #ifndef SHARE_VM_CLASSFILE_VMSYMBOLS_HPP #define SHARE_VM_CLASSFILE_VMSYMBOLS_HPP -#include "oops/symbol.hpp" +#include "jfr/support/jfrIntrinsics.hpp" #include "memory/iterator.hpp" -#include "trace/traceMacros.hpp" +#include "oops/symbol.hpp" +#include "utilities/macros.hpp" // The class vmSymbols is a name space for fast lookup of // symbols commonly used in the VM. @@ -606,8 +607,8 @@ template(classRedefinedCount_name, "classRedefinedCount") \ template(classLoader_name, "classLoader") \ \ - /* trace signatures */ \ - TRACE_TEMPLATES(template) \ + /* jfr signatures */ \ + JFR_TEMPLATES(template) \ \ /*end*/ @@ -736,7 +737,7 @@ do_intrinsic(_nanoTime, java_lang_System, nanoTime_name, void_long_signature, F_S) \ do_name( nanoTime_name, "nanoTime") \ \ - TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \ + JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \ \ do_intrinsic(_arraycopy, java_lang_System, arraycopy_name, arraycopy_signature, F_S) \ do_name( arraycopy_name, "arraycopy") \ --- old/src/share/vm/code/codeCache.cpp 2019-02-15 19:02:23.899306409 +0300 +++ new/src/share/vm/code/codeCache.cpp 2019-02-15 19:02:23.807309629 +0300 @@ -32,6 +32,7 @@ #include "code/pcDesc.hpp" #include "compiler/compileBroker.hpp" #include "gc_implementation/shared/markSweep.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "memory/gcLocker.hpp" #include "memory/iterator.hpp" @@ -46,9 +47,9 @@ #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" -#include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" + // Helper class for printing in CodeCache class CodeBlob_sizes { --- old/src/share/vm/compiler/compileBroker.cpp 2019-02-15 19:02:24.167297033 +0300 +++ new/src/share/vm/compiler/compileBroker.cpp 2019-02-15 19:02:24.063300671 +0300 @@ -30,6 +30,7 @@ #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" #include "interpreter/linkResolver.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" @@ -43,7 +44,6 @@ #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" -#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #ifdef COMPILER1 @@ -1911,6 +1911,19 @@ tty->print("%s", s.as_string()); } +static void post_compilation_event(EventCompilation* event, CompileTask* task) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + event->set_method(task->method()); + event->set_compileId(task->compile_id()); + event->set_compileLevel(task->comp_level()); + event->set_succeded(task->is_success()); + event->set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci); + event->set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size()); + event->set_inlinedBytes(task->num_inlined_bytecodes()); + event->commit(); +} + // ------------------------------------------------------------------ // CompileBroker::invoke_compiler_on_method // @@ -2008,8 +2021,9 @@ compilable = ci_env.compilable(); if (ci_env.failing()) { - task->set_failure_reason(ci_env.failure_reason()); + const char *failure_reason = ci_env.failure_reason(); const char* retry_message = ci_env.retry_message(); + task->set_failure_reason(failure_reason); if (_compilation_log != NULL) { _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message); } @@ -2019,6 +2033,13 @@ err_msg_res("COMPILE SKIPPED: %s", ci_env.failure_reason()); task->print_compilation(tty, msg); } + + EventCompilationFailure event; + if (event.should_commit()) { + event.set_compileId(compile_id); + event.set_failureMessage(failure_reason); + event.commit(); + } } else { task->mark_success(); task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes()); @@ -2032,14 +2053,7 @@ // simulate crash during compilation assert(task->compile_id() != CICrashAt, "just as planned"); if (event.should_commit()) { - event.set_method(target->get_Method()); - event.set_compileID(compile_id); - event.set_compileLevel(task->comp_level()); - event.set_succeded(task->is_success()); - event.set_isOsr(is_osr); - event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size()); - event.set_inlinedBytes(task->num_inlined_bytecodes()); - event.commit(); + post_compilation_event(&event, task); } } pop_jni_handle_block(); --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2019-02-15 19:02:24.499285418 +0300 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2019-02-15 19:02:24.375289756 +0300 @@ -4088,7 +4088,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec); + g1_policy()->record_collection_pause_start(sample_start_time_sec, *_gc_tracer_stw); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2019-02-15 19:02:24.891271703 +0300 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2019-02-15 19:02:24.799274921 +0300 @@ -75,7 +75,6 @@ class G1OldTracer; class EvacuationFailedInfo; class nmethod; -class Ticks; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2019-02-15 19:02:25.147262747 +0300 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2019-02-15 19:02:25.055265965 +0300 @@ -839,11 +839,11 @@ _stop_world_start = os::elapsedTime(); } -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, GCTracer &tracer) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. - update_survivors_policy(); + update_survivors_policy(tracer); assert(_g1->used() == _g1->recalculate_used(), err_msg("sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, @@ -1453,7 +1453,7 @@ } // Calculates survivor space parameters. -void G1CollectorPolicy::update_survivors_policy() { +void G1CollectorPolicy::update_survivors_policy(GCTracer &tracer) { double max_survivor_regions_d = (double) _young_list_target_length / (double) SurvivorRatio; // We use ceiling so that if max_survivor_regions_d is > 0.0 (but @@ -1461,7 +1461,7 @@ _max_survivor_regions = (uint) ceil(max_survivor_regions_d); _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( - HeapRegion::GrainWords * _max_survivor_regions); + HeapRegion::GrainWords * _max_survivor_regions, tracer); } bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2019-02-15 19:02:25.423253091 +0300 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2019-02-15 19:02:25.335256170 +0300 @@ -672,7 +672,7 @@ bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); // Record the start and end of an evacuation pause. - void record_collection_pause_start(double start_time_sec); + void record_collection_pause_start(double start_time_sec, GCTracer &tracer); void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info); // Record the start and end of a full collection. @@ -920,7 +920,7 @@ void update_max_gc_locker_expansion(); // Calculates survivor space parameters. - void update_survivors_policy(); + void update_survivors_policy(GCTracer &tracer); virtual void post_heap_initialize(); }; --- old/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp 2019-02-15 19:02:25.675244276 +0300 +++ new/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp 2019-02-15 19:02:25.555248473 +0300 @@ -583,13 +583,12 @@ G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) : _phase_times(phase_times), _phase(phase), _worker_id(worker_id) { if (_phase_times != NULL) { - _start_time = os::elapsedTime(); + _start_time = Ticks::now(); } } G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() { if (_phase_times != NULL) { - _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time); + _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); } } - --- old/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp 2019-02-15 19:02:25.927235460 +0300 +++ new/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp 2019-02-15 19:02:25.823239098 +0300 @@ -278,7 +278,7 @@ }; class G1GCParPhaseTimesTracker : public StackObj { - double _start_time; + Ticks _start_time; G1GCPhaseTimes::GCParPhases _phase; G1GCPhaseTimes* _phase_times; uint _worker_id; --- old/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp 2019-02-15 19:02:26.155227484 +0300 +++ new/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp 2019-02-15 19:02:26.059230842 +0300 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/ostream.hpp" @@ -105,6 +106,9 @@ ++_no_entries; } _array[_head_index] = G1MMUTrackerQueueElem(start, end); + + double slice_time = calculate_gc_time(end); + G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time); } // basically the _internal call does not remove expired entries --- old/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2019-02-15 19:02:26.383219509 +0300 +++ new/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2019-02-15 19:02:26.291222726 +0300 @@ -52,6 +52,9 @@ #include "runtime/vmThread.hpp" #include "utilities/copy.hpp" #include "utilities/events.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif // INCLUDE_JFR class HeapRegion; @@ -267,6 +270,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure); + JFR_ONLY(Jfr::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure)); if (G1StringDedup::is_enabled()) { G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); --- old/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp 2019-02-15 19:02:26.695208595 +0300 +++ new/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp 2019-02-15 19:02:26.579212653 +0300 @@ -187,6 +187,21 @@ } } +void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, + oop const old, size_t word_sz, uint age, + HeapWord * const obj_ptr, + AllocationContext_t context) const { + ParGCAllocBuffer* alloc_buf = _g1_par_allocator->alloc_buffer(dest_state, context); + if (alloc_buf->contains(obj_ptr)) { + _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, + dest_state.value() == InCSetState::Old, + alloc_buf->word_sz()); + } else { + _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age, + dest_state.value() == InCSetState::Old); + } +} + InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { if (state.is_young()) { age = !m->has_displaced_mark_helper() ? m->age() @@ -225,6 +240,10 @@ return _g1h->handle_evacuation_failure_par(this, old); } } + if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { + // The events are checked individually as part of the actual commit + report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context); + } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); --- old/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp 2019-02-15 19:02:26.939200060 +0300 +++ new/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp 2019-02-15 19:02:26.831203838 +0300 @@ -212,6 +212,10 @@ size_t word_sz, AllocationContext_t const context); + void report_promotion_event(InCSetState const dest_state, + oop const old, size_t word_sz, uint age, + HeapWord * const obj_ptr, AllocationContext_t context) const; + inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); public: --- old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2019-02-15 19:02:27.227189986 +0300 +++ new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2019-02-15 19:02:27.131193344 +0300 @@ -1048,7 +1048,7 @@ assert(to()->is_empty(), "to space should be empty now"); - adjust_desired_tenuring_threshold(); + adjust_desired_tenuring_threshold(gc_tracer); } else { handle_promotion_failed(gch, thread_state_set, gc_tracer); } --- old/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2019-02-15 19:02:27.599176975 +0300 +++ new/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2019-02-15 19:02:27.459181871 +0300 @@ -53,6 +53,9 @@ #include "services/memoryService.hpp" #include "utilities/events.hpp" #include "utilities/stack.inline.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif // INCLUDE_JFR PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -631,6 +634,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); + JFR_ONLY(Jfr::weak_oops_do(&always_true, adjust_pointer_closure())); CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); --- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2019-02-15 19:02:27.951164663 +0300 +++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2019-02-15 19:02:27.823169140 +0300 @@ -58,6 +58,9 @@ #include "services/memTracker.hpp" #include "utilities/events.hpp" #include "utilities/stack.inline.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif // INCLUDE_JFR #include @@ -2471,6 +2474,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); + JFR_ONLY(Jfr::weak_oops_do(&always_true, adjust_pointer_closure())); CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2019-02-15 19:02:28.407148715 +0300 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2019-02-15 19:02:28.239154591 +0300 @@ -148,6 +148,10 @@ claimed_stack_depth()->push(p); } + inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab); + protected: static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } public: --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2019-02-15 19:02:28.747136824 +0300 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2019-02-15 19:02:28.631140881 +0300 @@ -64,6 +64,33 @@ claim_or_forward_internal_depth(p); } +inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, + size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab) { + // Skip if memory allocation failed + if (new_obj != NULL) { + const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); + + if (lab != NULL) { + // Promotion of object through newly allocated PLAB + if (gc_tracer->should_report_promotion_in_new_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + size_t lab_size = lab->capacity(); + gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, + age, tenured, lab_size); + } + } else { + // Promotion of object directly to heap + if (gc_tracer->should_report_promotion_outside_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, + age, tenured); + } + } + } +} + // // This method is pretty bulky. It would be nice to split it up // into smaller submethods, but we need to be careful not to hurt @@ -85,11 +112,11 @@ bool new_obj_is_tenured = false; size_t new_obj_size = o->size(); - if (!promote_immediately) { - // Find the objects age, MT safe. - uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? - test_mark->displaced_mark_helper()->age() : test_mark->age(); + // Find the objects age, MT safe. + uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + if (!promote_immediately) { // Try allocating obj in to-space (unless too old) if (age < PSScavenge::tenuring_threshold()) { new_obj = (oop) _young_lab.allocate(new_obj_size); @@ -98,6 +125,7 @@ if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = (oop)young_space()->cas_allocate(new_obj_size); + promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); } else { // Flush and fill _young_lab.flush(); @@ -107,6 +135,7 @@ _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = (oop) _young_lab.allocate(new_obj_size); + promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); } else { _young_gen_is_full = true; } @@ -132,6 +161,7 @@ if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); } else { // Flush and fill _old_lab.flush(); @@ -148,6 +178,7 @@ _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = (oop) _old_lab.allocate(new_obj_size); + promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); } } } --- old/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2019-02-15 19:02:29.115123955 +0300 +++ new/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2019-02-15 19:02:28.947129830 +0300 @@ -92,6 +92,7 @@ // Private accessors static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; } + static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; } public: // Accessors --- old/src/share/vm/gc_implementation/shared/ageTable.cpp 2019-02-15 19:02:29.447112345 +0300 +++ new/src/share/vm/gc_implementation/shared/ageTable.cpp 2019-02-15 19:02:29.315116960 +0300 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/ageTable.hpp" +#include "gc_implementation/shared/ageTableTracer.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" #include "memory/collectorPolicy.hpp" #include "memory/resourceArea.hpp" @@ -78,7 +79,7 @@ } } -uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) { +uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCTracer &tracer) { size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); size_t total = 0; uint age = 1; @@ -92,7 +93,7 @@ } uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold; - if (PrintTenuringDistribution || UsePerfData) { + if (PrintTenuringDistribution || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) { if (PrintTenuringDistribution) { gclog_or_tty->cr(); @@ -110,6 +111,7 @@ age, sizes[age]*oopSize, total*oopSize); } } + AgeTableTracer::send_tenuring_distribution_event(age, wordSize * oopSize, tracer); if (UsePerfData) { _perf_sizes[age]->set_value(sizes[age]*oopSize); } --- old/src/share/vm/gc_implementation/shared/ageTable.hpp 2019-02-15 19:02:29.759101434 +0300 +++ new/src/share/vm/gc_implementation/shared/ageTable.hpp 2019-02-15 19:02:29.611106609 +0300 @@ -27,6 +27,7 @@ #include "oops/markOop.hpp" #include "oops/oop.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "runtime/perfData.hpp" /* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University. @@ -69,7 +70,7 @@ void merge_par(ageTable* subTable); // calculate new tenuring threshold based on age information - uint compute_tenuring_threshold(size_t survivor_capacity); + uint compute_tenuring_threshold(size_t survivor_capacity, GCTracer &tracer); private: PerfVariable* _perf_sizes[table_size]; --- old/src/share/vm/gc_implementation/shared/gcTimer.cpp 2019-02-15 19:02:30.167087166 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTimer.cpp 2019-02-15 19:02:30.003092901 +0300 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "utilities/growableArray.hpp" -#include "utilities/ticks.inline.hpp" +#include "utilities/ticks.hpp" // the "time" parameter for most functions // has a default value set by Ticks::now() @@ -349,7 +349,7 @@ GCTimer gc_timer; gc_timer.register_gc_start(1); - assert(gc_timer.gc_start() == 1, "Incorrect"); + assert(gc_timer.gc_start() == Ticks(1), "Incorrect"); } static void gc_end() { @@ -357,7 +357,7 @@ gc_timer.register_gc_start(1); gc_timer.register_gc_end(2); - assert(gc_timer.gc_end() == 2, "Incorrect"); + assert(gc_timer.gc_end() == Ticks(2), "Incorrect"); } }; --- old/src/share/vm/gc_implementation/shared/gcTrace.cpp 2019-02-15 19:02:30.459076956 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTrace.cpp 2019-02-15 19:02:30.351080732 +0300 @@ -33,7 +33,7 @@ #include "memory/referenceProcessorStats.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" -#include "utilities/ticks.inline.hpp" +#include "utilities/ticks.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/evacuationInfo.hpp" @@ -172,6 +172,30 @@ _tenuring_threshold = tenuring_threshold; } +bool YoungGCTracer::should_report_promotion_events() const { + return should_report_promotion_in_new_plab_event() || + should_report_promotion_outside_plab_event(); +} + +bool YoungGCTracer::should_report_promotion_in_new_plab_event() const { + return should_send_promotion_in_new_plab_event(); +} + +bool YoungGCTracer::should_report_promotion_outside_plab_event() const { + return should_send_promotion_outside_plab_event(); +} + +void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size); +} + +void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + send_promotion_outside_plab_event(klass, obj_size, age, tenured); +} + void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { assert_set_gc_id(); @@ -199,6 +223,12 @@ } #if INCLUDE_ALL_GCS +void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) { + send_g1_mmu_event(time_slice_sec * MILLIUNITS, + gc_time_sec * MILLIUNITS, + max_time_sec * MILLIUNITS); +} + void G1NewTracer::report_yc_type(G1YCType type) { assert_set_gc_id(); --- old/src/share/vm/gc_implementation/shared/gcTrace.hpp 2019-02-15 19:02:30.755066606 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTrace.hpp 2019-02-15 19:02:30.635070802 +0300 @@ -156,10 +156,39 @@ public: void report_promotion_failed(const PromotionFailedInfo& pf_info); void report_tenuring_threshold(const uint tenuring_threshold); + /* + * Methods for reporting Promotion in new or outside PLAB Events. + * + * The object age is always required as it is not certain that the mark word + * of the oop can be trusted at this stage. + * + * obj_size is the size of the promoted object in bytes. + * + * tenured should be true if the object has been promoted to the old + * space during this GC, if the object is copied to survivor space + * from young space or survivor space (aging) tenured should be false. + * + * plab_size is the size of the newly allocated PLAB in bytes. + */ + bool should_report_promotion_events() const; + bool should_report_promotion_in_new_plab_event() const; + bool should_report_promotion_outside_plab_event() const; + void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; private: void send_young_gc_event() const; void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const; + bool should_send_promotion_in_new_plab_event() const; + bool should_send_promotion_outside_plab_event() const; + void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; }; class OldGCTracer : public GCTracer { @@ -210,6 +239,13 @@ }; #if INCLUDE_ALL_GCS +class G1MMUTracer : public AllStatic { + static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms); + + public: + static void report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec); +}; + class G1NewTracer : public YoungGCTracer { G1YoungGCInfo _g1_young_gc_info; --- old/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2019-02-15 19:02:31.019057375 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2019-02-15 19:02:30.919060871 +0300 @@ -23,14 +23,13 @@ */ #include "precompiled.hpp" +#include "jfr/jfrEvents.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" #include "runtime/os.hpp" -#include "trace/tracing.hpp" -#include "trace/traceBackend.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" @@ -41,7 +40,7 @@ typedef uintptr_t TraceAddress; void GCTracer::send_garbage_collection_event() const { - EventGCGarbageCollection event(UNTIMED); + EventGarbageCollection event(UNTIMED); if (event.should_commit()) { event.set_gcId(_shared_gc_info.gc_id().id()); event.set_name(_shared_gc_info.name()); @@ -89,7 +88,7 @@ } void ParallelOldTracer::send_parallel_old_event() const { - EventGCParallelOld e(UNTIMED); + EventParallelOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); @@ -100,7 +99,7 @@ } void YoungGCTracer::send_young_gc_event() const { - EventGCYoungGarbageCollection e(UNTIMED); + EventYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_tenuringThreshold(_tenuring_threshold); @@ -110,8 +109,46 @@ } } +bool YoungGCTracer::should_send_promotion_in_new_plab_event() const { + return EventPromoteObjectInNewPLAB::is_enabled(); +} + +bool YoungGCTracer::should_send_promotion_outside_plab_event() const { + return EventPromoteObjectOutsidePLAB::is_enabled(); +} + +void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + + EventPromoteObjectInNewPLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_objectClass(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.set_plabSize(plab_size); + event.commit(); + } +} + +void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + + EventPromoteObjectOutsidePLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_objectClass(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.commit(); + } +} + void OldGCTracer::send_old_gc_event() const { - EventGCOldGarbageCollection e(UNTIMED); + EventOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_starttime(_shared_gc_info.start_timestamp()); @@ -120,8 +157,8 @@ } } -static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) { - TraceStructCopyFailed failed_info; +static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) { + JfrStructCopyFailed failed_info; failed_info.set_objectCount(cf_info.failed_count()); failed_info.set_firstSize(cf_info.first_size()); failed_info.set_smallestSize(cf_info.smallest_size()); @@ -133,7 +170,7 @@ EventPromotionFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); - e.set_data(to_trace_struct(pf_info)); + e.set_promotionFailed(to_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); e.commit(); } @@ -150,7 +187,7 @@ #if INCLUDE_ALL_GCS void G1NewTracer::send_g1_young_gc_event() { - EventGCG1GarbageCollection e(UNTIMED); + EventG1GarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type(_g1_young_gc_info.type()); @@ -160,16 +197,27 @@ } } +void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) { + EventG1MMU e; + if (e.should_commit()) { + e.set_gcId(GCId::peek().id()); + e.set_timeSlice((s8)time_slice_ms); + e.set_gcTime((s8)gc_time_ms); + e.set_pauseTarget((s8)max_time_ms); + e.commit(); + } +} + void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { - EventEvacuationInfo e; + EventEvacuationInformation e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_cSetRegions(info->collectionset_regions()); e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedAfter(info->collectionset_used_after()); e.set_allocationRegions(info->allocation_regions()); - e.set_allocRegionsUsedBefore(info->alloc_regions_used_before()); - e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); + e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before()); + e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); e.set_bytesCopied(info->bytes_copied()); e.set_regionsFreed(info->regions_freed()); e.commit(); @@ -180,14 +228,91 @@ EventEvacuationFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); - e.set_data(to_trace_struct(ef_info)); + e.set_evacuationFailed(to_struct(ef_info)); e.commit(); } } -#endif -static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) { - TraceStructVirtualSpace space; +// XXX +//static JfrStructG1EvacuationStatistics +//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) { +// JfrStructG1EvacuationStatistics s; +// s.set_gcId(gcid); +// s.set_allocated(summary.allocated() * HeapWordSize); +// s.set_wasted(summary.wasted() * HeapWordSize); +// s.set_used(summary.used() * HeapWordSize); +// s.set_undoWaste(summary.undo_wasted() * HeapWordSize); +// s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize); +// s.set_regionsRefilled(summary.regions_filled()); +// s.set_directAllocated(summary.direct_allocated() * HeapWordSize); +// s.set_failureUsed(summary.failure_used() * HeapWordSize); +// s.set_failureWaste(summary.failure_waste() * HeapWordSize); +// return s; +//} +// +//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { +// EventG1EvacuationYoungStatistics surv_evt; +// if (surv_evt.should_commit()) { +// surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); +// surv_evt.commit(); +// } +//} +// +//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { +// EventG1EvacuationOldStatistics old_evt; +// if (old_evt.should_commit()) { +// old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); +// old_evt.commit(); +// } +//} +// +//void G1NewTracer::send_basic_ihop_statistics(size_t threshold, +// size_t target_occupancy, +// size_t current_occupancy, +// size_t last_allocation_size, +// double last_allocation_duration, +// double last_marking_length) { +// EventG1BasicIHOP evt; +// if (evt.should_commit()) { +// evt.set_gcId(_shared_gc_info.gc_id().id()); +// evt.set_threshold(threshold); +// evt.set_targetOccupancy(target_occupancy); +// evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0); +// evt.set_currentOccupancy(current_occupancy); +// evt.set_recentMutatorAllocationSize(last_allocation_size); +// evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS); +// evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0); +// evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS); +// evt.commit(); +// } +//} +// +//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold, +// size_t internal_target_occupancy, +// size_t current_occupancy, +// size_t additional_buffer_size, +// double predicted_allocation_rate, +// double predicted_marking_length, +// bool prediction_active) { +// EventG1AdaptiveIHOP evt; +// if (evt.should_commit()) { +// evt.set_gcId(_shared_gc_info.gc_id().id()); +// evt.set_threshold(threshold); +// evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0); +// evt.set_ihopTargetOccupancy(internal_target_occupancy); +// evt.set_currentOccupancy(current_occupancy); +// evt.set_additionalBufferSize(additional_buffer_size); +// evt.set_predictedAllocationRate(predicted_allocation_rate); +// evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS); +// evt.set_predictionActive(prediction_active); +// evt.commit(); +// } +//} + +#endif // INCLUDE_ALL_GCS + +static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) { + JfrStructVirtualSpace space; space.set_start((TraceAddress)summary.start()); space.set_committedEnd((TraceAddress)summary.committed_end()); space.set_committedSize(summary.committed_size()); @@ -196,8 +321,8 @@ return space; } -static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) { - TraceStructObjectSpace space; +static JfrStructObjectSpace to_struct(const SpaceSummary& summary) { + JfrStructObjectSpace space; space.set_start((TraceAddress)summary.start()); space.set_end((TraceAddress)summary.end()); space.set_used(summary.used()); @@ -218,12 +343,27 @@ if (e.should_commit()) { e.set_gcId(_gc_id.id()); e.set_when((u1)_when); - e.set_heapSpace(to_trace_struct(heap_space)); + e.set_heapSpace(to_struct(heap_space)); e.set_heapUsed(heap_summary->used()); e.commit(); } } +// void visit(const G1HeapSummary* g1_heap_summary) const { +// visit((GCHeapSummary*)g1_heap_summary); +// +// EventG1HeapSummary e; +// if (e.should_commit()) { +// e.set_gcId(_shared_gc_info.gc_id().id()); +// e.set_when((u1)_when); +// e.set_edenUsedSize(g1_heap_summary->edenUsed()); +// e.set_edenTotalSize(g1_heap_summary->edenCapacity()); +// e.set_survivorUsedSize(g1_heap_summary->survivorUsed()); +// e.set_numberOfRegions(g1_heap_summary->numberOfRegions()); +// e.commit(); +// } +// } + void visit(const PSHeapSummary* ps_heap_summary) const { visit((GCHeapSummary*)ps_heap_summary); @@ -239,12 +379,12 @@ e.set_gcId(_gc_id.id()); e.set_when((u1)_when); - e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); - e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space())); - e.set_youngSpace(to_trace_struct(ps_heap_summary->young())); - e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); - e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); - e.set_toSpace(to_trace_struct(ps_heap_summary->to())); + e.set_oldSpace(to_struct(ps_heap_summary->old())); + e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space())); + e.set_youngSpace(to_struct(ps_heap_summary->young())); + e.set_edenSpace(to_struct(ps_heap_summary->eden())); + e.set_fromSpace(to_struct(ps_heap_summary->from())); + e.set_toSpace(to_struct(ps_heap_summary->to())); e.commit(); } } @@ -255,8 +395,8 @@ heap_summary.accept(&visitor); } -static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) { - TraceStructMetaspaceSizes meta_sizes; +static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) { + JfrStructMetaspaceSizes meta_sizes; meta_sizes.set_committed(sizes.committed()); meta_sizes.set_used(sizes.used()); @@ -271,9 +411,9 @@ e.set_gcId(_shared_gc_info.gc_id().id()); e.set_when((u1) when); e.set_gcThreshold(meta_space_summary.capacity_until_GC()); - e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); - e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); - e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); + e.set_metaspace(to_struct(meta_space_summary.meta_space())); + e.set_dataSpace(to_struct(meta_space_summary.data_space())); + e.set_classSpace(to_struct(meta_space_summary.class_space())); e.commit(); } } @@ -283,14 +423,14 @@ public: PhaseSender(GCId gc_id) : _gc_id(gc_id) {} - template - void send_phase(PausePhase* pause) { + template + void send_phase(GCPhase* phase) { T event(UNTIMED); if (event.should_commit()) { event.set_gcId(_gc_id.id()); - event.set_name(pause->name()); - event.set_starttime(pause->start()); - event.set_endtime(pause->end()); + event.set_name(phase->name()); + event.set_starttime(phase->start()); + event.set_endtime(phase->end()); event.commit(); } } --- old/src/share/vm/gc_implementation/shared/gcTraceTime.cpp 2019-02-15 19:02:31.339046187 +0300 +++ new/src/share/vm/gc_implementation/shared/gcTraceTime.cpp 2019-02-15 19:02:31.235049822 +0300 @@ -32,7 +32,7 @@ #include "runtime/thread.inline.hpp" #include "runtime/timer.hpp" #include "utilities/ostream.hpp" -#include "utilities/ticks.inline.hpp" +#include "utilities/ticks.hpp" GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : @@ -72,7 +72,7 @@ if (_doit) { const Tickspan duration = stop_counter - _start_counter; - double duration_in_seconds = TicksToTimeHelper::seconds(duration); + double duration_in_seconds = duration.seconds(); if (_print_cr) { gclog_or_tty->print_cr(", %3.7f secs]", duration_in_seconds); } else { --- old/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2019-02-15 19:02:31.631035977 +0300 +++ new/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2019-02-15 19:02:31.531039473 +0300 @@ -26,34 +26,53 @@ #include "precompiled.hpp" #include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/objectCountEventSender.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/heapInspection.hpp" -#include "trace/tracing.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" -#if INCLUDE_SERVICES -void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { -#if INCLUDE_TRACE - assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), - "Only call this method if the event is enabled"); - - EventObjectCountAfterGC event(UNTIMED); - event.set_gcId(gc_id.id()); - event.set_class(entry->klass()); - event.set_count(entry->count()); - event.set_totalSize(entry->words() * BytesPerWord); - event.set_endtime(timestamp); - event.commit(); -#endif // INCLUDE_TRACE -} + +#if INCLUDE_SERVICES bool ObjectCountEventSender::should_send_event() { -#if INCLUDE_TRACE - return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId); +#if INCLUDE_JFR + return _should_send_requestable_event || EventObjectCountAfterGC::is_enabled(); #else return false; -#endif // INCLUDE_TRACE +#endif // INCLUDE_JFR +} + +bool ObjectCountEventSender::_should_send_requestable_event = false; + +void ObjectCountEventSender::enable_requestable_event() { + _should_send_requestable_event = true; +} + +void ObjectCountEventSender::disable_requestable_event() { + _should_send_requestable_event = false; +} + +template +void ObjectCountEventSender::send_event_if_enabled(Klass* klass, GCId gc_id, jlong count, julong size, const Ticks& timestamp) { + T event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(gc_id.id()); + event.set_objectClass(klass); + event.set_count(count); + event.set_totalSize(size); + event.set_endtime(timestamp); + event.commit(); + } +} + +void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { + Klass* klass = entry->klass(); + jlong count = entry->count(); + julong total_size = entry->words() * BytesPerWord; + + send_event_if_enabled(klass, gc_id, count, total_size, timestamp); + send_event_if_enabled(klass, gc_id, count, total_size, timestamp); } #endif // INCLUDE_SERVICES --- old/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp 2019-02-15 19:02:31.931025488 +0300 +++ new/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp 2019-02-15 19:02:31.799030103 +0300 @@ -32,10 +32,17 @@ #if INCLUDE_SERVICES class KlassInfoEntry; -class Ticks; class ObjectCountEventSender : public AllStatic { + static bool _should_send_requestable_event; + + template + static void send_event_if_enabled(Klass* klass, GCId gc_id, jlong count, julong size, const Ticks& timestamp); + public: + static void enable_requestable_event(); + static void disable_requestable_event(); + static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp); static bool should_send_event(); }; --- old/src/share/vm/gc_interface/allocTracer.cpp 2019-02-15 19:02:32.247014440 +0300 +++ new/src/share/vm/gc_interface/allocTracer.cpp 2019-02-15 19:02:32.139018216 +0300 @@ -25,23 +25,28 @@ #include "precompiled.hpp" #include "gc_implementation/shared/gcId.hpp" #include "gc_interface/allocTracer.hpp" -#include "trace/tracing.hpp" +#include "jfr/jfrEvents.hpp" #include "runtime/handles.hpp" #include "utilities/globalDefinitions.hpp" +#if INCLUDE_JFR +#include "jfr/support/jfrAllocationTracer.hpp" +#endif -void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) { - EventAllocObjectOutsideTLAB event; +void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) { + JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) + EventObjectAllocationOutsideTLAB event; if (event.should_commit()) { - event.set_class(klass()); + event.set_objectClass(klass()); event.set_allocationSize(alloc_size); event.commit(); } } -void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) { - EventAllocObjectInNewTLAB event; +void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread) { + JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) + EventObjectAllocationInNewTLAB event; if (event.should_commit()) { - event.set_class(klass()); + event.set_objectClass(klass()); event.set_allocationSize(alloc_size); event.set_tlabSize(tlab_size); event.commit(); --- old/src/share/vm/gc_interface/allocTracer.hpp 2019-02-15 19:02:32.543004092 +0300 +++ new/src/share/vm/gc_interface/allocTracer.hpp 2019-02-15 19:02:32.443007588 +0300 @@ -30,8 +30,8 @@ class AllocTracer : AllStatic { public: - static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); - static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); + static void send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread); + static void send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread); static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId); }; --- old/src/share/vm/gc_interface/collectedHeap.cpp 2019-02-15 19:02:32.806994862 +0300 +++ new/src/share/vm/gc_interface/collectedHeap.cpp 2019-02-15 19:02:32.698998638 +0300 @@ -286,7 +286,7 @@ return NULL; } - AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); + AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, Thread::current()); if (ZeroTLAB) { // ..and clear it. --- old/src/share/vm/gc_interface/collectedHeap.inline.hpp 2019-02-15 19:02:33.106984375 +0300 +++ new/src/share/vm/gc_interface/collectedHeap.inline.hpp 2019-02-15 19:02:32.994988290 +0300 @@ -140,7 +140,7 @@ "Unexpected exception, will result in uninitialized storage"); THREAD->incr_allocated_bytes(size * HeapWordSize); - AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + AllocTracer::send_allocation_outside_tlab_event(klass, result, size * HeapWordSize, Thread::current()); return result; } --- old/src/share/vm/memory/defNewGeneration.cpp 2019-02-15 19:02:33.342976124 +0300 +++ new/src/share/vm/memory/defNewGeneration.cpp 2019-02-15 19:02:33.238979760 +0300 @@ -551,10 +551,10 @@ return allocate(size, is_tlab); } -void DefNewGeneration::adjust_desired_tenuring_threshold() { +void DefNewGeneration::adjust_desired_tenuring_threshold(GCTracer &tracer) { // Set the desired survivor size to half the real survivor space _tenuring_threshold = - age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); + age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, tracer); } void DefNewGeneration::collect(bool full, @@ -664,7 +664,7 @@ assert(to()->is_empty(), "to space should be empty now"); - adjust_desired_tenuring_threshold(); + adjust_desired_tenuring_threshold(gc_tracer); // A successful scavenge should restart the GC time limit count which is // for full GC's. --- old/src/share/vm/memory/defNewGeneration.hpp 2019-02-15 19:02:33.646965497 +0300 +++ new/src/share/vm/memory/defNewGeneration.hpp 2019-02-15 19:02:33.538969273 +0300 @@ -129,7 +129,7 @@ } // Tenuring - void adjust_desired_tenuring_threshold(); + void adjust_desired_tenuring_threshold(GCTracer &tracer); // Spaces EdenSpace* _eden_space; --- old/src/share/vm/memory/genCollectedHeap.cpp 2019-02-15 19:02:33.922955849 +0300 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2019-02-15 19:02:33.814959625 +0300 @@ -59,6 +59,9 @@ #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #endif // INCLUDE_ALL_GCS +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif // INCLUDE_JFR GenCollectedHeap* GenCollectedHeap::_gch; NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) @@ -753,6 +756,7 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { JNIHandles::weak_oops_do(&always_true, root_closure); + JFR_ONLY(Jfr::weak_oops_do(&always_true, root_closure)); for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); } --- old/src/share/vm/memory/metaspaceTracer.cpp 2019-02-15 19:02:34.206945922 +0300 +++ new/src/share/vm/memory/metaspaceTracer.cpp 2019-02-15 19:02:34.102949557 +0300 @@ -24,10 +24,9 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/metaspaceTracer.hpp" #include "oops/oop.inline.hpp" -#include "trace/tracing.hpp" -#include "trace/traceBackend.hpp" void MetaspaceTracer::report_gc_threshold(size_t old_val, size_t new_val, @@ -67,9 +66,9 @@ event.set_anonymousClassLoader(true); } else { if (cld->is_the_null_class_loader_data()) { - event.set_classLoader((Klass*) NULL); + event.set_classLoader(NULL); } else { - event.set_classLoader(cld->class_loader()->klass()); + event.set_classLoader(cld); } event.set_anonymousClassLoader(false); } --- old/src/share/vm/memory/referenceProcessor.cpp 2019-02-15 19:02:34.450937393 +0300 +++ new/src/share/vm/memory/referenceProcessor.cpp 2019-02-15 19:02:34.346941029 +0300 @@ -34,6 +34,9 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif // INCLUDE_JFR PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -306,6 +309,7 @@ } #endif JNIHandles::weak_oops_do(is_alive, keep_alive); + JFR_ONLY(Jfr::weak_oops_do(is_alive, keep_alive)); complete_gc->do_void(); } --- old/src/share/vm/oops/arrayKlass.cpp 2019-02-15 19:02:34.798925229 +0300 +++ new/src/share/vm/oops/arrayKlass.cpp 2019-02-15 19:02:34.678929424 +0300 @@ -94,6 +94,7 @@ int vtable_size = Universe::base_vtable_size(); set_vtable_length(vtable_size); set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5) + JFR_ONLY(INIT_ID(this);) } --- old/src/share/vm/oops/instanceKlass.hpp 2019-02-15 19:02:35.054916281 +0300 +++ new/src/share/vm/oops/instanceKlass.hpp 2019-02-15 19:02:34.954919776 +0300 @@ -38,7 +38,9 @@ #include "utilities/accessFlags.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/macros.hpp" -#include "trace/traceMacros.hpp" +#if INCLUDE_JFR +#include "jfr/support/jfrKlassExtension.hpp" +#endif // An InstanceKlass is the VM level representation of a Java class. // It contains all information needed for at class at execution runtime. @@ -833,7 +835,7 @@ // support for stub routines static ByteSize init_state_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_state)); } - TRACE_DEFINE_OFFSET; + JFR_ONLY(DEFINE_KLASS_TRACE_ID_OFFSET;) static ByteSize init_thread_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_thread)); } // subclass/subinterface checks --- old/src/share/vm/oops/klass.cpp 2019-02-15 19:02:35.370905237 +0300 +++ new/src/share/vm/oops/klass.cpp 2019-02-15 19:02:35.258909151 +0300 @@ -38,7 +38,6 @@ #include "oops/oop.inline2.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.inline.hpp" -#include "trace/traceMacros.hpp" #include "utilities/stack.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -47,6 +46,9 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #endif // INCLUDE_ALL_GCS +#if INCLUDE_JFR +#include "jfr/support/jfrTraceIdExtension.hpp" +#endif bool Klass::is_cloneable() const { return _access_flags.is_cloneable() || @@ -197,7 +199,6 @@ set_subklass(NULL); set_next_sibling(NULL); set_next_link(NULL); - TRACE_INIT_ID(this); set_prototype_header(markOopDesc::prototype()); set_biased_lock_revocation_count(0); @@ -526,6 +527,7 @@ void Klass::remove_unshareable_info() { assert (DumpSharedSpaces, "only called for DumpSharedSpaces"); + JFR_ONLY(REMOVE_ID(this);) set_subklass(NULL); set_next_sibling(NULL); // Clear the java mirror @@ -537,7 +539,7 @@ } void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) { - TRACE_INIT_ID(this); + JFR_ONLY(RESTORE_ID(this);) // If an exception happened during CDS restore, some of these fields may already be // set. We leave the class on the CLD list, even if incomplete so that we don't // modify the CLD list outside a safepoint. --- old/src/share/vm/oops/klass.hpp 2019-02-15 19:02:35.630896149 +0300 +++ new/src/share/vm/oops/klass.hpp 2019-02-15 19:02:35.530899645 +0300 @@ -32,7 +32,6 @@ #include "oops/klassPS.hpp" #include "oops/metadata.hpp" #include "oops/oop.hpp" -#include "trace/traceMacros.hpp" #include "utilities/accessFlags.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -40,6 +39,9 @@ #include "gc_implementation/g1/g1OopClosures.hpp" #include "gc_implementation/parNew/parOopClosures.hpp" #endif // INCLUDE_ALL_GCS +#if INCLUDE_JFR +#include "jfr/support/jfrTraceIdExtension.hpp" +#endif // // A Klass provides: @@ -170,7 +172,7 @@ markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type jint _biased_lock_revocation_count; - TRACE_DEFINE_KLASS_TRACE_ID; + JFR_ONLY(DEFINE_TRACE_ID_FIELD;) // Remembered sets support for the oops in the klasses. jbyte _modified_oops; // Card Table Equivalent (YC/CMS support) @@ -612,7 +614,7 @@ jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; } void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; } - TRACE_DEFINE_KLASS_METHODS; + JFR_ONLY(DEFINE_TRACE_ID_METHODS;) // garbage collection support virtual void oops_do(OopClosure* cl); --- old/src/share/vm/oops/method.hpp 2019-02-15 19:02:35.974884126 +0300 +++ new/src/share/vm/oops/method.hpp 2019-02-15 19:02:35.878887482 +0300 @@ -37,6 +37,10 @@ #include "oops/typeArrayOop.hpp" #include "utilities/accessFlags.hpp" #include "utilities/growableArray.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_JFR +#include "jfr/support/jfrTraceIdExtension.hpp" +#endif // A Method* represents a Java method. // @@ -116,6 +120,8 @@ _has_injected_profile : 1, : 2; + JFR_ONLY(DEFINE_TRACE_FLAG;) + #ifndef PRODUCT int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) #endif @@ -805,6 +811,8 @@ bool has_injected_profile() { return _has_injected_profile; } void set_has_injected_profile(bool x) { _has_injected_profile = x; } + JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;) + ConstMethod::MethodType method_type() const { return _constMethod->method_type(); } --- old/src/share/vm/opto/bytecodeInfo.cpp 2019-02-15 19:02:36.290873083 +0300 +++ new/src/share/vm/opto/bytecodeInfo.cpp 2019-02-15 19:02:36.174877137 +0300 @@ -29,6 +29,7 @@ #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" +#include "jfr/jfrEvents.hpp" #include "oops/objArrayKlass.hpp" #include "opto/callGenerator.hpp" #include "opto/parse.hpp" @@ -479,9 +480,28 @@ return NULL; } +static void post_inlining_event(int compile_id,const char* msg, bool success, int bci, ciMethod* caller, ciMethod* callee) { + assert(caller != NULL, "invariant"); + assert(callee != NULL, "invariant"); + EventCompilerInlining event; + if (event.should_commit()) { + JfrStructCalleeMethod callee_struct; + callee_struct.set_type(callee->holder()->name()->as_utf8()); + callee_struct.set_name(callee->name()->as_utf8()); + callee_struct.set_descriptor(callee->signature()->as_symbol()->as_utf8()); + event.set_compileId(compile_id); + event.set_message(msg); + event.set_succeeded(success); + event.set_bci(bci); + event.set_caller(caller->get_Method()); + event.set_callee(callee_struct); + event.commit(); + } +} + //------------------------------print_inlining--------------------------------- void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, - bool success) const { + ciMethod* caller_method, bool success) const { const char* inline_msg = msg(); assert(inline_msg != NULL, "just checking"); if (C->log() != NULL) { @@ -500,6 +520,7 @@ //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); } } + post_inlining_event(C->compile_id(), inline_msg, success, caller_bci, caller_method, callee_method); } //------------------------------ok_to_inline----------------------------------- @@ -522,14 +543,14 @@ // Do some initial checks. if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { set_msg("failed initial checks"); - print_inlining(callee_method, caller_bci, false /* !success */); + print_inlining(callee_method, caller_bci, caller_method, false /* !success */); return NULL; } // Do some parse checks. set_msg(check_can_parse(callee_method)); if (msg() != NULL) { - print_inlining(callee_method, caller_bci, false /* !success */); + print_inlining(callee_method, caller_bci, caller_method, false /* !success */); return NULL; } @@ -571,7 +592,7 @@ if (msg() == NULL) { set_msg("inline (hot)"); } - print_inlining(callee_method, caller_bci, true /* success */); + print_inlining(callee_method, caller_bci, caller_method, true /* success */); build_inline_tree_for_callee(callee_method, jvms, caller_bci); if (InlineWarmCalls && !wci.is_hot()) return new (C) WarmCallInfo(wci); // copy to heap @@ -582,7 +603,7 @@ if (msg() == NULL) { set_msg("too cold to inline"); } - print_inlining(callee_method, caller_bci, false /* !success */ ); + print_inlining(callee_method, caller_bci, caller_method, false /* !success */ ); return NULL; } --- old/src/share/vm/opto/compile.cpp 2019-02-15 19:02:36.578863018 +0300 +++ new/src/share/vm/opto/compile.cpp 2019-02-15 19:02:36.462867072 +0300 @@ -32,6 +32,7 @@ #include "compiler/compileLog.hpp" #include "compiler/disassembler.hpp" #include "compiler/oopMap.hpp" +#include "jfr/jfrEvents.hpp" #include "opto/addnode.hpp" #include "opto/block.hpp" #include "opto/c2compiler.hpp" @@ -65,7 +66,6 @@ #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/timer.hpp" -#include "trace/tracing.hpp" #include "utilities/copy.hpp" #if defined AD_MD_HPP # include AD_MD_HPP @@ -81,7 +81,6 @@ # include "adfiles/ad_ppc_64.hpp" #endif - // -------------------- Compile::mach_constant_base_node ----------------------- // Constant table base node singleton. MachConstantBaseNode* Compile::mach_constant_base_node() { @@ -3588,13 +3587,6 @@ _failure_reason = reason; } - EventCompilerFailure event; - if (event.should_commit()) { - event.set_compileID(Compile::compile_id()); - event.set_failure(reason); - event.commit(); - } - if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { C->print_method(PHASE_FAILURE); } --- old/src/share/vm/opto/compile.hpp 2019-02-15 19:02:36.938850437 +0300 +++ new/src/share/vm/opto/compile.hpp 2019-02-15 19:02:36.830854211 +0300 @@ -31,6 +31,7 @@ #include "code/exceptionHandlerTable.hpp" #include "compiler/compilerOracle.hpp" #include "compiler/compileBroker.hpp" +#include "jfr/jfrEvents.hpp" #include "libadt/dict.hpp" #include "libadt/port.hpp" #include "libadt/vectset.hpp" @@ -41,7 +42,6 @@ #include "opto/regmask.hpp" #include "runtime/deoptimization.hpp" #include "runtime/vmThread.hpp" -#include "trace/tracing.hpp" #include "utilities/ticks.hpp" class Block; @@ -637,7 +637,7 @@ if (event.should_commit()) { event.set_starttime(C->_latest_stage_start_counter); event.set_phase((u1) cpt); - event.set_compileID(C->_compile_id); + event.set_compileId(C->_compile_id); event.set_phaseLevel(level); event.commit(); } @@ -654,7 +654,7 @@ if (event.should_commit()) { event.set_starttime(C->_latest_stage_start_counter); event.set_phase((u1) PHASE_END); - event.set_compileID(C->_compile_id); + event.set_compileId(C->_compile_id); event.set_phaseLevel(level); event.commit(); } --- old/src/share/vm/opto/library_call.cpp 2019-02-15 19:02:37.302837717 +0300 +++ new/src/share/vm/opto/library_call.cpp 2019-02-15 19:02:37.206841071 +0300 @@ -27,6 +27,7 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" +#include "jfr/support/jfrIntrinsics.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/callGenerator.hpp" @@ -40,7 +41,7 @@ #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" -#include "trace/traceMacros.hpp" +#include "utilities/macros.hpp" class LibraryIntrinsic : public InlineCallGenerator { // Extend the set of intrinsics known to the runtime: @@ -236,9 +237,9 @@ bool inline_unsafe_allocate(); bool inline_unsafe_copyMemory(); bool inline_native_currentThread(); -#ifdef TRACE_HAVE_INTRINSICS +#ifdef JFR_HAVE_INTRINSICS bool inline_native_classID(); - bool inline_native_threadID(); + bool inline_native_getEventWriter(); #endif bool inline_native_time_funcs(address method, const char* funcName); bool inline_native_isInterrupted(); @@ -879,10 +880,10 @@ case vmIntrinsics::_currentThread: return inline_native_currentThread(); case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted(); -#ifdef TRACE_HAVE_INTRINSICS - case vmIntrinsics::_classID: return inline_native_classID(); - case vmIntrinsics::_threadID: return inline_native_threadID(); - case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime"); +#ifdef JFR_HAVE_INTRINSICS + case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime"); + case vmIntrinsics::_getClassId: return inline_native_classID(); + case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter(); #endif case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis"); case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime"); @@ -3265,51 +3266,76 @@ return true; } -#ifdef TRACE_HAVE_INTRINSICS +#ifdef JFR_HAVE_INTRINSICS /* * oop -> myklass * myklass->trace_id |= USED * return myklass->trace_id & ~0x3 */ bool LibraryCallKit::inline_native_classID() { - null_check_receiver(); // null-check, then ignore - Node* cls = null_check(argument(1), T_OBJECT); + Node* cls = null_check(argument(0), T_OBJECT); Node* kls = load_klass_from_mirror(cls, false, NULL, 0); kls = null_check(kls, T_OBJECT); - ByteSize offset = TRACE_ID_OFFSET; + + ByteSize offset = KLASS_TRACE_ID_OFFSET; Node* insp = basic_plus_adr(kls, in_bytes(offset)); Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered); - Node* bits = longcon(~0x03l); // ignore bit 0 & 1 - Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits)); + Node* clsused = longcon(0x01l); // set the class bit Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused)); - const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered); - set_result(andl); + +#ifdef TRACE_ID_META_BITS + Node* mbits = longcon(~TRACE_ID_META_BITS); + tvalue = _gvn.transform(new (C) AndLNode(tvalue, mbits)); +#endif +#ifdef TRACE_ID_SHIFT + Node* cbits = intcon(TRACE_ID_SHIFT); + tvalue = _gvn.transform(new (C) URShiftLNode(tvalue, cbits)); +#endif + + set_result(tvalue); return true; } -bool LibraryCallKit::inline_native_threadID() { - Node* tls_ptr = NULL; - Node* cur_thr = generate_current_thread(tls_ptr); - Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); - Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); - p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset())); - - Node* threadid = NULL; - size_t thread_id_size = OSThread::thread_id_size(); - if (thread_id_size == (size_t) BytesPerLong) { - threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered)); - } else if (thread_id_size == (size_t) BytesPerInt) { - threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered); - } else { - ShouldNotReachHere(); - } - set_result(threadid); +bool LibraryCallKit::inline_native_getEventWriter() { + Node* tls_ptr = _gvn.transform(new (C) ThreadLocalNode()); + + Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, + in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR) + ); + + Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); + + Node* jobj_cmp_null = _gvn.transform( new (C) CmpPNode(jobj, null()) ); + Node* test_jobj_eq_null = _gvn.transform( new (C) BoolNode(jobj_cmp_null, BoolTest::eq) ); + + IfNode* iff_jobj_null = + create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN); + + enum { _normal_path = 1, + _null_path = 2, + PATH_LIMIT }; + + RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT); + PhiNode* result_val = new (C) PhiNode(result_rgn, TypePtr::BOTTOM); + + Node* jobj_is_null = _gvn.transform(new (C) IfTrueNode(iff_jobj_null)); + result_rgn->init_req(_null_path, jobj_is_null); + result_val->init_req(_null_path, null()); + + Node* jobj_is_not_null = _gvn.transform(new (C) IfFalseNode(iff_jobj_null)); + result_rgn->init_req(_normal_path, jobj_is_not_null); + + Node* res = make_load(jobj_is_not_null, jobj, TypeInstPtr::NOTNULL, T_OBJECT, MemNode::unordered); + result_val->init_req(_normal_path, res); + + set_result(result_rgn, result_val); + return true; } -#endif +#endif // JFR_HAVE_INTRINSICS //------------------------inline_native_time_funcs-------------- // inline code for System.currentTimeMillis() and System.nanoTime() --- old/src/share/vm/opto/parse.hpp 2019-02-15 19:02:37.822819546 +0300 +++ new/src/share/vm/opto/parse.hpp 2019-02-15 19:02:37.726822900 +0300 @@ -87,7 +87,7 @@ JVMState* jvms, WarmCallInfo* wci_result); void print_inlining(ciMethod* callee_method, int caller_bci, - bool success) const; + ciMethod* caller_method, bool success) const; InlineTree* caller_tree() const { return _caller_tree; } InlineTree* callee_at(int bci, ciMethod* m) const; --- old/src/share/vm/opto/superword.hpp 2019-02-15 19:02:38.130808783 +0300 +++ new/src/share/vm/opto/superword.hpp 2019-02-15 19:02:38.026812417 +0300 @@ -201,6 +201,32 @@ static const SWNodeInfo initial; }; + +// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows +//------------------------------OrderedPair--------------------------- +// Ordered pair of Node*. +class OrderedPair VALUE_OBJ_CLASS_SPEC { + protected: + Node* _p1; + Node* _p2; + public: + OrderedPair() : _p1(NULL), _p2(NULL) {} + OrderedPair(Node* p1, Node* p2) { + if (p1->_idx < p2->_idx) { + _p1 = p1; _p2 = p2; + } else { + _p1 = p2; _p2 = p1; + } + } + + bool operator==(const OrderedPair &rhs) { + return _p1 == rhs._p1 && _p2 == rhs._p2; + } + void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } + + static const OrderedPair initial; +}; + // -----------------------------SuperWord--------------------------------- // Transforms scalar operations into packed (superword) operations. class SuperWord : public ResourceObj { @@ -505,29 +531,4 @@ void print(); }; - -//------------------------------OrderedPair--------------------------- -// Ordered pair of Node*. -class OrderedPair VALUE_OBJ_CLASS_SPEC { - protected: - Node* _p1; - Node* _p2; - public: - OrderedPair() : _p1(NULL), _p2(NULL) {} - OrderedPair(Node* p1, Node* p2) { - if (p1->_idx < p2->_idx) { - _p1 = p1; _p2 = p2; - } else { - _p1 = p2; _p2 = p1; - } - } - - bool operator==(const OrderedPair &rhs) { - return _p1 == rhs._p1 && _p2 == rhs._p2; - } - void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } - - static const OrderedPair initial; -}; - #endif // SHARE_VM_OPTO_SUPERWORD_HPP --- old/src/share/vm/prims/jni.cpp 2019-02-15 19:02:38.398799419 +0300 +++ new/src/share/vm/prims/jni.cpp 2019-02-15 19:02:38.298802913 +0300 @@ -32,6 +32,8 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "interpreter/linkResolver.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrThreadId.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #if INCLUDE_ALL_GCS @@ -76,7 +78,6 @@ #include "runtime/vm_operations.hpp" #include "services/memTracker.hpp" #include "services/runtimeService.hpp" -#include "trace/tracing.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -5018,6 +5019,14 @@ return &jni_NativeInterface; } +static void post_thread_start_event(const JavaThread* jt) { + assert(jt != NULL, "invariant"); + EventThreadStart event; + if (event.should_commit()) { + event.set_thread(JFR_THREAD_ID(jt)); + event.commit(); + } +} // Invocation API @@ -5239,11 +5248,7 @@ JvmtiExport::post_thread_start(thread); } - EventThreadStart event; - if (event.should_commit()) { - event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); - event.commit(); - } + post_thread_start_event(thread); #ifndef PRODUCT #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED @@ -5454,11 +5459,7 @@ JvmtiExport::post_thread_start(thread); } - EventThreadStart event; - if (event.should_commit()) { - event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); - event.commit(); - } + post_thread_start_event(thread); *(JNIEnv**)penv = thread->jni_environment(); --- old/src/share/vm/prims/jvm.cpp 2019-02-15 19:02:38.802785303 +0300 +++ new/src/share/vm/prims/jvm.cpp 2019-02-15 19:02:38.690789216 +0300 @@ -37,6 +37,7 @@ #include "classfile/vmSymbols.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/bytecode.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/oopFactory.hpp" #include "memory/referenceType.hpp" #include "memory/universe.inline.hpp" @@ -67,7 +68,6 @@ #include "services/attachListener.hpp" #include "services/management.hpp" #include "services/threadService.hpp" -#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" @@ -3209,6 +3209,12 @@ } JVM_END +static void post_thread_sleep_event(EventThreadSleep* event, jlong millis) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + event->set_time(millis); + event->commit(); +} JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis)) JVMWrapper("JVM_Sleep"); @@ -3255,8 +3261,7 @@ // us while we were sleeping. We do not overwrite those. if (!HAS_PENDING_EXCEPTION) { if (event.should_commit()) { - event.set_time(millis); - event.commit(); + post_thread_sleep_event(&event, millis); } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1); @@ -3272,8 +3277,7 @@ thread->osthread()->set_state(old_state); } if (event.should_commit()) { - event.set_time(millis); - event.commit(); + post_thread_sleep_event(&event, millis); } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0); --- old/src/share/vm/prims/nativeLookup.cpp 2019-02-15 19:02:39.198771466 +0300 +++ new/src/share/vm/prims/nativeLookup.cpp 2019-02-15 19:02:39.090775239 +0300 @@ -41,6 +41,9 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "utilities/macros.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" #endif @@ -136,6 +139,9 @@ { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) }, { CC"Java_sun_hotspot_WhiteBox_registerNatives", NULL, FN_PTR(JVM_RegisterWhiteBoxMethods) }, +#if INCLUDE_JFR + { CC"Java_jdk_jfr_internal_JVM_registerNatives", NULL, FN_PTR(jfr_register_natives) }, +#endif }; static address lookup_special_native(char* jni_name) { --- old/src/share/vm/prims/unsafe.cpp 2019-02-15 19:02:39.474761824 +0300 +++ new/src/share/vm/prims/unsafe.cpp 2019-02-15 19:02:39.378765178 +0300 @@ -28,6 +28,7 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS +#include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" @@ -38,7 +39,6 @@ #include "runtime/reflection.hpp" #include "runtime/synchronizer.hpp" #include "services/threadService.hpp" -#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" @@ -1236,6 +1236,15 @@ #endif UNSAFE_END +static void post_thread_park_event(EventThreadPark* event, const oop obj, jlong timeout) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + event->set_parkedClass((obj != NULL) ? obj->klass() : NULL); + event->set_timeout(timeout); + event->set_address((obj != NULL) ? (u8)cast_from_oop(obj) : 0); + event->commit(); +} + UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) UnsafeWrapper("Unsafe_Park"); EventThreadPark event; @@ -1254,11 +1263,7 @@ (uintptr_t) thread->parker()); #endif /* USDT2 */ if (event.should_commit()) { - oop obj = thread->current_park_blocker(); - event.set_klass((obj != NULL) ? obj->klass() : NULL); - event.set_timeout(time); - event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop(obj) : 0); - event.commit(); + post_thread_park_event(&event, thread->current_park_blocker(), time); } UNSAFE_END --- old/src/share/vm/runtime/arguments.cpp 2019-02-15 19:02:39.798750504 +0300 +++ new/src/share/vm/runtime/arguments.cpp 2019-02-15 19:02:39.670754976 +0300 @@ -44,6 +44,9 @@ #include "utilities/macros.hpp" #include "utilities/stringUtils.hpp" #include "utilities/taskqueue.hpp" +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" #endif @@ -151,6 +154,20 @@ } } +#if INCLUDE_JFR +// return true on failure +static bool match_jfr_option(const JavaVMOption** option) { + assert((*option)->optionString != NULL, "invariant"); + char* tail = NULL; + if (match_option(*option, "-XX:StartFlightRecording", (const char**)&tail)) { + return Jfr::on_start_flight_recording_option(option, tail); + } else if (match_option(*option, "-XX:FlightRecorderOptions", (const char**)&tail)) { + return Jfr::on_flight_recorder_option(option, tail); + } + return false; +} +#endif + static void logOption(const char* opt) { if (PrintVMOptions) { jio_fprintf(defaultStream::output_stream(), "VM option '%s'\n", opt); @@ -552,8 +569,7 @@ /* Scan the directory for jars/zips, appending them to path. */ struct dirent *entry; - char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal); - while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { + while ((entry = os::readdir(dir)) != NULL) { const char* name = entry->d_name; const char* ext = name + strlen(name) - 4; bool isJarOrZip = ext > name && @@ -567,7 +583,6 @@ FREE_C_HEAP_ARRAY(char, jarpath, mtInternal); } } - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); os::closedir(dir); return path; } @@ -3397,6 +3412,10 @@ "ManagementServer is not supported in this VM.\n"); return JNI_ERR; #endif // INCLUDE_MANAGEMENT +#if INCLUDE_JFR + } else if (match_jfr_option(&option)) { + return JNI_EINVAL; +#endif } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx // Skip -XX:Flags= since that case has already been handled if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) { @@ -3481,14 +3500,12 @@ if (dir == NULL) return false; struct dirent *entry; - char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal); bool hasJarFile = false; - while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { + while (!hasJarFile && (entry = os::readdir(dir)) != NULL) { const char* name = entry->d_name; const char* ext = name + strlen(name) - 4; hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0); } - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); os::closedir(dir); return hasJarFile ; } @@ -3570,8 +3587,7 @@ if (dir != NULL) { int num_ext_jars = 0; struct dirent *entry; - char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(extDir), mtInternal); - while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { + while ((entry = os::readdir(dir)) != NULL) { const char* name = entry->d_name; const char* ext = name + strlen(name) - 4; if (ext > name && (os::file_name_strcmp(ext, ".jar") == 0)) { @@ -3590,7 +3606,6 @@ } } } - FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); os::closedir(dir); if (num_ext_jars > 0) { nonEmptyDirs += 1; --- old/src/share/vm/runtime/frame.cpp 2019-02-15 19:02:40.282733594 +0300 +++ new/src/share/vm/runtime/frame.cpp 2019-02-15 19:02:40.142738485 +0300 @@ -239,6 +239,19 @@ return NULL; } +bool frame::is_entry_frame_valid(JavaThread* thread) const { + // Validate the JavaCallWrapper an entry frame must have + address jcw = (address)entry_frame_call_wrapper(); + bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)fp()); // less than stack base + if (!jcw_safe) { + return false; + } + + // Validate sp saved in the java frame anchor + JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); + return (jfa->last_Java_sp() > sp()); +} + bool frame::should_be_deoptimized() const { if (_deopt_state == is_deoptimized || !is_compiled_frame() ) return false; --- old/src/share/vm/runtime/frame.hpp 2019-02-15 19:02:40.566723673 +0300 +++ new/src/share/vm/runtime/frame.hpp 2019-02-15 19:02:40.458727446 +0300 @@ -181,6 +181,8 @@ frame sender_for_interpreter_frame(RegisterMap* map) const; frame sender_for_native_frame(RegisterMap* map) const; + bool is_entry_frame_valid(JavaThread* thread) const; + // All frames: // A low-level interface for vframes: --- old/src/share/vm/runtime/globals.cpp 2019-02-15 19:02:40.874712913 +0300 +++ new/src/share/vm/runtime/globals.cpp 2019-02-15 19:02:40.758716965 +0300 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" @@ -31,7 +32,6 @@ #include "utilities/ostream.hpp" #include "utilities/macros.hpp" #include "utilities/top.hpp" -#include "trace/tracing.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1_globals.hpp" #endif // INCLUDE_ALL_GCS @@ -610,8 +610,8 @@ { E e; e.set_name(name); - e.set_old_value(old_value); - e.set_new_value(new_value); + e.set_oldValue(old_value); + e.set_newValue(new_value); e.set_origin(origin); e.commit(); } --- old/src/share/vm/runtime/globals.hpp 2019-02-15 19:02:41.194701734 +0300 +++ new/src/share/vm/runtime/globals.hpp 2019-02-15 19:02:41.078705786 +0300 @@ -204,6 +204,10 @@ #endif // no compilers +#if !INCLUDE_JFR +#define LogJFR false +#endif + // string type aliases used only in this file typedef const char* ccstr; typedef const char* ccstrlist; // represents string arguments which accumulate @@ -3982,15 +3986,27 @@ "Allocation less than this value will be allocated " \ "using malloc. Larger allocations will use mmap.") \ \ - product(bool, EnableTracing, false, \ - "Enable event-based tracing") \ - \ - product(bool, UseLockedTracing, false, \ - "Use locked-tracing when doing event-based tracing") \ - \ product_pd(bool, PreserveFramePointer, \ "Use the FP register for holding the frame pointer " \ - "and not as a general purpose register.") + "and not as a general purpose register.") \ + \ + JFR_ONLY(product(bool, FlightRecorder, false, \ + "Enable Flight Recorder")) \ + \ + JFR_ONLY(product(ccstr, FlightRecorderOptions, NULL, \ + "Flight Recorder options")) \ + \ + JFR_ONLY(product(ccstr, StartFlightRecording, NULL, \ + "Start flight recording with options")) \ + \ + JFR_ONLY(product(bool, UnlockCommercialFeatures, false, \ + "This flag is ignored. Left for compatibility")) \ + \ + experimental(bool, UseFastUnorderedTimeStamps, false, \ + "Use platform unstable time where supported for timestamps only") \ + \ + JFR_ONLY(product(bool, LogJFR, false, \ + "Enable JFR logging (consider +Verbose)")) \ /* * Macros for factoring of globals --- old/src/share/vm/runtime/handles.cpp 2019-02-15 19:02:41.602687482 +0300 +++ new/src/share/vm/runtime/handles.cpp 2019-02-15 19:02:41.494691255 +0300 @@ -72,7 +72,8 @@ while (bottom < top) { // This test can be moved up but for now check every oop. - assert((*bottom)->is_oop(), "handle should point to oop"); + // JFR is known to set mark word to 0 for duration of leak analysis VM operaiton + assert((*bottom)->is_oop(INCLUDE_JFR), "handle should point to oop"); f->do_oop(bottom++); } --- old/src/share/vm/runtime/java.cpp 2019-02-15 19:02:41.842679098 +0300 +++ new/src/share/vm/runtime/java.cpp 2019-02-15 19:02:41.738682731 +0300 @@ -30,6 +30,8 @@ #include "compiler/compileBroker.hpp" #include "compiler/compilerOracle.hpp" #include "interpreter/bytecodeHistogram.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrThreadId.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.hpp" @@ -58,7 +60,6 @@ #include "runtime/timer.hpp" #include "runtime/vm_operations.hpp" #include "services/memTracker.hpp" -#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/histogram.hpp" @@ -95,6 +96,9 @@ #include "opto/indexSet.hpp" #include "opto/runtime.hpp" #endif +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif #ifndef USDT2 HS_DTRACE_PROBE_DECL(hotspot, vm__shutdown); @@ -523,10 +527,12 @@ EventThreadEnd event; if (event.should_commit()) { - event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); - event.commit(); + event.set_thread(JFR_THREAD_ID(thread)); + event.commit(); } + JFR_ONLY(Jfr::on_vm_shutdown();) + // Always call even when there are not JVMTI environments yet, since environments // may be attached late and JVMTI must track phases of VM execution JvmtiExport::post_vm_death(); --- old/src/share/vm/runtime/mutexLocker.cpp 2019-02-15 19:02:42.218665965 +0300 +++ new/src/share/vm/runtime/mutexLocker.cpp 2019-02-15 19:02:42.082670715 +0300 @@ -126,12 +126,16 @@ Monitor* Service_lock = NULL; Monitor* PeriodicTask_lock = NULL; -#ifdef INCLUDE_TRACE +#ifdef INCLUDE_JFR Mutex* JfrStacktrace_lock = NULL; Monitor* JfrMsg_lock = NULL; Mutex* JfrBuffer_lock = NULL; Mutex* JfrStream_lock = NULL; Mutex* JfrThreadGroups_lock = NULL; + +#ifndef SUPPORTS_NATIVE_CX8 +Mutex* JfrCounters_lock = NULL; +#endif #endif #ifndef SUPPORTS_NATIVE_CX8 @@ -280,12 +284,16 @@ def(CompileThread_lock , Monitor, nonleaf+5, false ); def(PeriodicTask_lock , Monitor, nonleaf+5, true); -#ifdef INCLUDE_TRACE +#ifdef INCLUDE_JFR def(JfrMsg_lock , Monitor, leaf, true); def(JfrBuffer_lock , Mutex, leaf, true); def(JfrThreadGroups_lock , Mutex, leaf, true); def(JfrStream_lock , Mutex, nonleaf, true); def(JfrStacktrace_lock , Mutex, special, true); + +#ifndef SUPPORTS_NATIVE_CX8 + def(JfrCounters_lock , Mutex, special, false); +#endif #endif #ifndef SUPPORTS_NATIVE_CX8 --- old/src/share/vm/runtime/mutexLocker.hpp 2019-02-15 19:02:42.618651993 +0300 +++ new/src/share/vm/runtime/mutexLocker.hpp 2019-02-15 19:02:42.458657582 +0300 @@ -142,12 +142,17 @@ extern Monitor* Service_lock; // a lock used for service thread operation extern Monitor* PeriodicTask_lock; // protects the periodic task structure -#ifdef INCLUDE_TRACE +#ifdef INCLUDE_JFR extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table extern Monitor* JfrMsg_lock; // protects JFR messaging extern Mutex* JfrBuffer_lock; // protects JFR buffer operations extern Mutex* JfrStream_lock; // protects JFR stream access extern Mutex* JfrThreadGroups_lock; // protects JFR access to Thread Groups + +#ifndef SUPPORTS_NATIVE_CX8 +extern Mutex* JfrCounters_lock; // provides atomic updates of JFR counters +#endif + #endif #ifndef SUPPORTS_NATIVE_CX8 --- old/src/share/vm/runtime/objectMonitor.cpp 2019-02-15 19:02:42.986639140 +0300 +++ new/src/share/vm/runtime/objectMonitor.cpp 2019-02-15 19:02:42.834644449 +0300 @@ -24,6 +24,8 @@ #include "precompiled.hpp" #include "classfile/vmSymbols.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrThreadId.hpp" #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" @@ -37,8 +39,6 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" -#include "trace/tracing.hpp" -#include "trace/traceMacros.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" @@ -54,6 +54,9 @@ #ifdef TARGET_OS_FAMILY_bsd # include "os_bsd.inline.hpp" #endif +#if INCLUDE_JFR +#include "jfr/support/jfrFlush.hpp" +#endif #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64) // Need to inhibit inlining for older versions of GCC to avoid build-time failures @@ -376,7 +379,12 @@ // Ensure the object-monitor relationship remains stable while there's contention. Atomic::inc_ptr(&_count); + JFR_ONLY(JfrConditionalFlushWithStacktrace flush(jt);) EventJavaMonitorEnter event; + if (event.should_commit()) { + event.set_monitorClass(((oop)this->object())->klass()); + event.set_address((uintptr_t)(this->object_addr())); + } { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); @@ -465,9 +473,7 @@ } if (event.should_commit()) { - event.set_klass(((oop)this->object())->klass()); - event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); - event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); + event.set_previousOwner((uintptr_t)_previous_owner_tid); event.commit(); } @@ -990,11 +996,11 @@ _Responsible = NULL ; } -#if INCLUDE_TRACE +#if INCLUDE_JFR // get the owner's thread id for the MonitorEnter event // if it is enabled and the thread isn't suspended - if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { - _previous_owner_tid = SharedRuntime::get_java_tid(Self); + if (not_suspended && EventJavaMonitorEnter::is_enabled()) { + _previous_owner_tid = JFR_THREAD_ID(Self); } #endif @@ -1443,15 +1449,17 @@ } // helper method for posting a monitor wait event -void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, - jlong notifier_tid, - jlong timeout, - bool timedout) { - event->set_klass(((oop)this->object())->klass()); - event->set_timeout((TYPE_ULONG)timeout); - event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); - event->set_notifier((TYPE_OSTHREAD)notifier_tid); - event->set_timedOut((TYPE_BOOLEAN)timedout); +static void post_monitor_wait_event(EventJavaMonitorWait* event, + ObjectMonitor* monitor, + jlong notifier_tid, + jlong timeout, + bool timedout) { + assert(monitor != NULL, "invariant"); + event->set_monitorClass(((oop)monitor->object())->klass()); + event->set_timeout(timeout); + event->set_address((uintptr_t)monitor->object_addr()); + event->set_notifier((u8)notifier_tid); + event->set_timedOut(timedout); event->commit(); } @@ -1489,7 +1497,7 @@ // this ObjectMonitor. } if (event.should_commit()) { - post_monitor_wait_event(&event, 0, millis, false); + post_monitor_wait_event(&event, this, 0, millis, false); } TEVENT (Wait - Throw IEX) ; THROW(vmSymbols::java_lang_InterruptedException()); @@ -1633,7 +1641,7 @@ } if (event.should_commit()) { - post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); + post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT); } OrderAccess::fence() ; @@ -1716,7 +1724,7 @@ } iterator->_notified = 1 ; Thread * Self = THREAD; - iterator->_notifier_tid = Self->osthread()->thread_id(); + iterator->_notifier_tid = JFR_THREAD_ID(Self); ObjectWaiter * List = _EntryList ; if (List != NULL) { @@ -1842,7 +1850,7 @@ guarantee (iterator->_notified == 0, "invariant") ; iterator->_notified = 1 ; Thread * Self = THREAD; - iterator->_notifier_tid = Self->osthread()->thread_id(); + iterator->_notifier_tid = JFR_THREAD_ID(Self); if (Policy != 4) { iterator->TState = ObjectWaiter::TS_ENTER ; } --- old/src/share/vm/runtime/objectMonitor.hpp 2019-02-15 19:02:43.414624192 +0300 +++ new/src/share/vm/runtime/objectMonitor.hpp 2019-02-15 19:02:43.310627824 +0300 @@ -55,9 +55,6 @@ void wait_reenter_end(ObjectMonitor *mon); }; -// forward declaration to avoid include tracing.hpp -class EventJavaMonitorWait; - // WARNING: // This is a very sensitive and fragile class. DO NOT make any // change unless you are fully aware of the underlying semantics. @@ -224,10 +221,6 @@ void ctAsserts () ; void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ; bool ExitSuspendEquivalent (JavaThread * Self) ; - void post_monitor_wait_event(EventJavaMonitorWait * event, - jlong notifier_tid, - jlong timeout, - bool timedout); private: friend class ObjectSynchronizer; --- old/src/share/vm/runtime/os.hpp 2019-02-15 19:02:43.670615251 +0300 +++ new/src/share/vm/runtime/os.hpp 2019-02-15 19:02:43.554619302 +0300 @@ -556,13 +556,13 @@ //File i/o operations static size_t read(int fd, void *buf, unsigned int nBytes); + static size_t read_at(int fd, void *buf, unsigned int nBytes, jlong offset); static size_t restartable_read(int fd, void *buf, unsigned int nBytes); static size_t write(int fd, const void *buf, unsigned int nBytes); // Reading directories. static DIR* opendir(const char* dirname); - static int readdir_buf_size(const char *path); - static struct dirent* readdir(DIR* dirp, dirent* dbuf); + static struct dirent* readdir(DIR* dirp); static int closedir(DIR* dirp); // Dynamic library extension @@ -605,6 +605,16 @@ // Unload library static void dll_unload(void *lib); + // Callback for loaded module information + // Input parameters: + // char* module_file_name, + // address module_base_addr, + // address module_top_addr, + // void* param + typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *); + + static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param); + // Return the handle of this process static void* get_default_process_handle(); --- old/src/share/vm/runtime/safepoint.cpp 2019-02-15 19:02:44.070601281 +0300 +++ new/src/share/vm/runtime/safepoint.cpp 2019-02-15 19:02:43.974604633 +0300 @@ -32,6 +32,7 @@ #include "code/scopeDesc.hpp" #include "gc_interface/collectedHeap.hpp" #include "interpreter/interpreter.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" @@ -83,6 +84,73 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +template +static void set_current_safepoint_id(E* event, int adjustment = 0) { + assert(event != NULL, "invariant"); + event->set_safepointId(SafepointSynchronize::safepoint_counter() + adjustment); +} + +static void post_safepoint_begin_event(EventSafepointBegin* event, + int thread_count, + int critical_thread_count) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + set_current_safepoint_id(event); + event->set_totalThreadCount(thread_count); + event->set_jniCriticalThreadCount(critical_thread_count); + event->commit(); +} + +static void post_safepoint_cleanup_event(EventSafepointCleanup* event) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + set_current_safepoint_id(event); + event->commit(); +} + +static void post_safepoint_synchronize_event(EventSafepointStateSynchronization* event, + int initial_number_of_threads, + int threads_waiting_to_block, + unsigned int iterations) { + assert(event != NULL, "invariant"); + if (event->should_commit()) { + // Group this event together with the ones committed after the counter is increased + set_current_safepoint_id(event, 1); + event->set_initialThreadCount(initial_number_of_threads); + event->set_runningThreadCount(threads_waiting_to_block); + event->set_iterations(iterations); + event->commit(); + } +} + +static void post_safepoint_wait_blocked_event(EventSafepointWaitBlocked* event, + int initial_threads_waiting_to_block) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + set_current_safepoint_id(event); + event->set_runningThreadCount(initial_threads_waiting_to_block); + event->commit(); +} + +static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask* event, + const char* name) { + assert(event != NULL, "invariant"); + if (event->should_commit()) { + set_current_safepoint_id(event); + event->set_name(name); + event->commit(); + } +} + +static void post_safepoint_end_event(EventSafepointEnd* event) { + assert(event != NULL, "invariant"); + if (event->should_commit()) { + // Group this event together with the ones committed before the counter increased + set_current_safepoint_id(event, -1); + event->commit(); + } +} + // -------------------------------------------------------------------------------------------------- // Implementation of Safepoint begin/end @@ -97,7 +165,7 @@ // Roll all threads forward to a safepoint and suspend them all void SafepointSynchronize::begin() { - + EventSafepointBegin begin_event; Thread* myThread = Thread::current(); assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint"); @@ -189,6 +257,9 @@ // between states, the safepointing code will wait for the thread to // block itself when it attempts transitions to a new state. // + EventSafepointStateSynchronization sync_event; + int initial_running = 0; + _state = _synchronizing; OrderAccess::fence(); @@ -243,8 +314,11 @@ } } - if (PrintSafepointStatistics && iterations == 0) { - begin_statistics(nof_threads, still_running); + if (iterations == 0) { + initial_running = still_running; + if (PrintSafepointStatistics) { + begin_statistics(nof_threads, still_running); + } } if (still_running > 0) { @@ -336,43 +410,56 @@ update_statistics_on_spin_end(); } + if (sync_event.should_commit()) { + post_safepoint_synchronize_event(&sync_event, initial_running, _waiting_to_block, iterations); + } + // wait until all threads are stopped - while (_waiting_to_block > 0) { - if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block); - if (!SafepointTimeout || timeout_error_printed) { - Safepoint_lock->wait(true); // true, means with no safepoint checks - } else { - // Compute remaining time - jlong remaining_time = safepoint_limit_time - os::javaTimeNanos(); - - // If there is no remaining time, then there is an error - if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) { - print_safepoint_timeout(_blocking_timeout); + { + EventSafepointWaitBlocked wait_blocked_event; + int initial_waiting_to_block = _waiting_to_block; + + while (_waiting_to_block > 0) { + if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block); + if (!SafepointTimeout || timeout_error_printed) { + Safepoint_lock->wait(true); // true, means with no safepoint checks + } else { + // Compute remaining time + jlong remaining_time = safepoint_limit_time - os::javaTimeNanos(); + + // If there is no remaining time, then there is an error + if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) { + print_safepoint_timeout(_blocking_timeout); + } } } - } - assert(_waiting_to_block == 0, "sanity check"); + assert(_waiting_to_block == 0, "sanity check"); #ifndef PRODUCT - if (SafepointTimeout) { - jlong current_time = os::javaTimeNanos(); - if (safepoint_limit_time < current_time) { - tty->print_cr("# SafepointSynchronize: Finished after " - INT64_FORMAT_W(6) " ms", - ((current_time - safepoint_limit_time) / MICROUNITS + - SafepointTimeoutDelay)); + if (SafepointTimeout) { + jlong current_time = os::javaTimeNanos(); + if (safepoint_limit_time < current_time) { + tty->print_cr("# SafepointSynchronize: Finished after " + INT64_FORMAT_W(6) " ms", + ((current_time - safepoint_limit_time) / MICROUNITS + + SafepointTimeoutDelay)); + } } - } #endif - assert((_safepoint_counter & 0x1) == 0, "must be even"); - assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); - _safepoint_counter ++; + assert((_safepoint_counter & 0x1) == 0, "must be even"); + assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); + _safepoint_counter ++; - // Record state - _state = _synchronized; + // Record state + _state = _synchronized; - OrderAccess::fence(); + OrderAccess::fence(); + + if (wait_blocked_event.should_commit()) { + post_safepoint_wait_blocked_event(&wait_blocked_event, initial_waiting_to_block); + } + } #ifdef ASSERT for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { @@ -395,12 +482,22 @@ } // Call stuff that needs to be run when a safepoint is just about to be completed - do_cleanup_tasks(); + { + EventSafepointCleanup cleanup_event; + do_cleanup_tasks(); + if (cleanup_event.should_commit()) { + post_safepoint_cleanup_event(&cleanup_event); + } + } if (PrintSafepointStatistics) { // Record how much time spend on the above cleanup tasks update_statistics_on_cleanup_end(os::javaTimeNanos()); } + + if (begin_event.should_commit()) { + post_safepoint_begin_event(&begin_event, nof_threads, _current_jni_active_count); + } } // Wake up all threads, so they are ready to resume execution after the safepoint @@ -409,6 +506,7 @@ assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); assert((_safepoint_counter & 0x1) == 1, "must be odd"); + EventSafepointEnd event; _safepoint_counter ++; // memory fence isn't required here since an odd _safepoint_counter // value can do no harm and a fence is issued below anyway. @@ -494,6 +592,9 @@ // record this time so VMThread can keep track how much time has elasped // since last safepoint. _end_of_last_safepoint = os::javaTimeMillis(); + if (event.should_commit()) { + post_safepoint_end_event(&event); + } } bool SafepointSynchronize::is_cleanup_needed() { @@ -507,32 +608,62 @@ // Various cleaning tasks that should be done periodically at safepoints void SafepointSynchronize::do_cleanup_tasks() { { - TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime); + const char* name = "deflating idle monitors"; + EventSafepointCleanupTask event; + TraceTime t1(name, TraceSafepointCleanupTime); ObjectSynchronizer::deflate_idle_monitors(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } { - TraceTime t2("updating inline caches", TraceSafepointCleanupTime); + const char* name = "updating inline caches"; + EventSafepointCleanupTask event; + TraceTime t2(name, TraceSafepointCleanupTime); InlineCacheBuffer::update_inline_caches(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } { - TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime); + const char* name = "compilation policy safepoint handler"; + EventSafepointCleanupTask event; + TraceTime t3(name, TraceSafepointCleanupTime); CompilationPolicy::policy()->do_safepoint_work(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } { - TraceTime t4("mark nmethods", TraceSafepointCleanupTime); + const char* name = "mark nmethods"; + EventSafepointCleanupTask event; + TraceTime t4(name, TraceSafepointCleanupTime); NMethodSweeper::mark_active_nmethods(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } if (SymbolTable::needs_rehashing()) { - TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime); + const char* name = "rehashing symbol table"; + EventSafepointCleanupTask event; + TraceTime t5(name, TraceSafepointCleanupTime); SymbolTable::rehash_table(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } if (StringTable::needs_rehashing()) { - TraceTime t6("rehashing string table", TraceSafepointCleanupTime); + const char* name = "rehashing string table"; + EventSafepointCleanupTask event; + TraceTime t6(name, TraceSafepointCleanupTime); StringTable::rehash_table(); + if (event.should_commit()) { + post_safepoint_cleanup_task_event(&event, name); + } } // rotate log files? --- old/src/share/vm/runtime/safepoint.hpp 2019-02-15 19:02:44.402589686 +0300 +++ new/src/share/vm/runtime/safepoint.hpp 2019-02-15 19:02:44.266594436 +0300 @@ -145,6 +145,7 @@ // Query inline static bool is_at_safepoint() { return _state == _synchronized; } inline static bool is_synchronizing() { return _state == _synchronizing; } + inline static int safepoint_counter() { return _safepoint_counter; } inline static bool do_call_back() { return (_state != _not_synchronized); --- old/src/share/vm/runtime/sweeper.cpp 2019-02-15 19:02:44.742577813 +0300 +++ new/src/share/vm/runtime/sweeper.cpp 2019-02-15 19:02:44.602582702 +0300 @@ -28,6 +28,7 @@ #include "code/icBuffer.hpp" #include "code/nmethod.hpp" #include "compiler/compileBroker.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "runtime/atomic.hpp" @@ -38,9 +39,8 @@ #include "runtime/sweeper.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" -#include "trace/tracing.hpp" #include "utilities/events.hpp" -#include "utilities/ticks.inline.hpp" +#include "utilities/ticks.hpp" #include "utilities/xmlstream.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -318,6 +318,24 @@ } } +static void post_sweep_event(EventSweepCodeCache* event, + const Ticks& start, + const Ticks& end, + s4 traversals, + int swept, + int flushed, + int zombified) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + event->set_starttime(start); + event->set_endtime(end); + event->set_sweepId(traversals); + event->set_sweptCount(swept); + event->set_flushedCount(flushed); + event->set_zombifiedCount(zombified); + event->commit(); +} + void NMethodSweeper::sweep_code_cache() { ResourceMark rm; Ticks sweep_start_counter = Ticks::now(); @@ -394,15 +412,7 @@ EventSweepCodeCache event(UNTIMED); if (event.should_commit()) { - event.set_starttime(sweep_start_counter); - event.set_endtime(sweep_end_counter); - event.set_sweepIndex(_traversals); - event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); - event.set_sweptCount(swept_count); - event.set_flushedCount(_flushed_count); - event.set_markedCount(_marked_for_reclamation_count); - event.set_zombifiedCount(_zombified_count); - event.commit(); + post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, _flushed_count, _zombified_count); } #ifdef ASSERT --- old/src/share/vm/runtime/synchronizer.cpp 2019-02-15 19:02:45.014568314 +0300 +++ new/src/share/vm/runtime/synchronizer.cpp 2019-02-15 19:02:44.926571387 +0300 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/vmSymbols.hpp" +#include "jfr/jfrEvents.hpp" #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" @@ -1178,6 +1179,17 @@ TEVENT (omFlush) ; } +static void post_monitor_inflate_event(EventJavaMonitorInflate* event, + const oop obj) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + event->set_monitorClass(obj->klass()); + event->set_address((uintptr_t)(void*)obj); + // XXX no such counters. implement? +// event->set_cause((u1)cause); + event->commit(); +} + // Fast path code shared by multiple functions ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { markOop mark = obj->mark(); @@ -1200,6 +1212,8 @@ assert (Universe::verify_in_progress() || !SafepointSynchronize::is_at_safepoint(), "invariant") ; + EventJavaMonitorInflate event; + for (;;) { const markOop mark = object->mark() ; assert (!mark->has_bias_pattern(), "invariant") ; @@ -1330,6 +1344,9 @@ object->klass()->external_name()); } } + if (event.should_commit()) { + post_monitor_inflate_event(&event, object); + } return m ; } @@ -1380,6 +1397,9 @@ object->klass()->external_name()); } } + if (event.should_commit()) { + post_monitor_inflate_event(&event, object); + } return m ; } } --- old/src/share/vm/runtime/thread.cpp 2019-02-15 19:02:45.358556302 +0300 +++ new/src/share/vm/runtime/thread.cpp 2019-02-15 19:02:45.242560353 +0300 @@ -32,6 +32,8 @@ #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" #include "interpreter/oopMapCache.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrThreadId.hpp" #include "jvmtifiles/jvmtiEnv.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/metaspaceShared.hpp" @@ -77,8 +79,6 @@ #include "services/management.hpp" #include "services/memTracker.hpp" #include "services/threadService.hpp" -#include "trace/tracing.hpp" -#include "trace/traceMacros.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -111,6 +111,9 @@ #if INCLUDE_RTM_OPT #include "runtime/rtmLocking.hpp" #endif +#if INCLUDE_JFR +#include "jfr/jfr.hpp" +#endif PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -342,7 +345,7 @@ // Reclaim the objectmonitors from the omFreeList of the moribund thread. ObjectSynchronizer::omFlush (this) ; - EVENT_THREAD_DESTRUCT(this); + JFR_ONLY(Jfr::on_thread_destruct(this);) // stack_base can be NULL if the thread is never started or exited before // record_stack_base_and_size called. Although, we would like to ensure @@ -1670,7 +1673,7 @@ EventThreadStart event; if (event.should_commit()) { - event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); + event.set_thread(JFR_THREAD_ID(this)); event.commit(); } @@ -1804,12 +1807,12 @@ // from java_lang_Thread object EventThreadEnd event; if (event.should_commit()) { - event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); - event.commit(); + event.set_thread(JFR_THREAD_ID(this)); + event.commit(); } // Call after last event on thread - EVENT_THREAD_EXIT(this); + JFR_ONLY(Jfr::on_thread_exit(this);) // Call Thread.exit(). We try 3 times in case we got another Thread.stop during // the execution of the method. If that is not enough, then we don't really care. Thread.stop @@ -2185,6 +2188,8 @@ if (check_asyncs) { check_and_handle_async_exceptions(); } + + JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(this);) } void JavaThread::send_thread_stop(oop java_throwable) { @@ -2423,6 +2428,8 @@ fatal("missed deoptimization!"); } } + + JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);) } // Slow path when the native==>VM/Java barriers detect a safepoint is in @@ -3311,6 +3318,14 @@ if (wt != NULL) tc->do_thread(wt); +#if INCLUDE_JFR + Thread* sampler_thread = Jfr::sampler_thread(); + if (sampler_thread != NULL) { + tc->do_thread(sampler_thread); + } + +#endif + // If CompilerThreads ever become non-JavaThreads, add them here } @@ -3437,6 +3452,8 @@ return status; } + JFR_ONLY(Jfr::on_vm_init();) + // Should be done after the heap is fully created main_thread->cache_global_variables(); @@ -3564,11 +3581,6 @@ quicken_jni_functions(); - // Must be run after init_ft which initializes ft_enabled - if (TRACE_INITIALIZE() != JNI_OK) { - vm_exit_during_initialization("Failed to initialize tracing backend"); - } - // Set flag that basic initialization has completed. Used by exceptions and various // debug stuff, that does not work until all basic classes have been initialized. set_init_completed(); @@ -3637,9 +3649,7 @@ // Notify JVMTI agents that VM initialization is complete - nop if no agents. JvmtiExport::post_vm_initialized(); - if (TRACE_START() != JNI_OK) { - vm_exit_during_initialization("Failed to start tracing backend."); - } + JFR_ONLY(Jfr::on_vm_start();) if (CleanChunkPoolAsync) { Chunk::start_chunk_pool_cleaner_task(); --- old/src/share/vm/runtime/thread.hpp 2019-02-15 19:02:45.854538982 +0300 +++ new/src/share/vm/runtime/thread.hpp 2019-02-15 19:02:45.666545546 +0300 @@ -42,8 +42,6 @@ #include "runtime/threadLocalStorage.hpp" #include "runtime/thread_ext.hpp" #include "runtime/unhandledOops.hpp" -#include "trace/traceBackend.hpp" -#include "trace/traceMacros.hpp" #include "utilities/exceptions.hpp" #include "utilities/macros.hpp" #include "utilities/top.hpp" @@ -54,6 +52,9 @@ #ifdef TARGET_ARCH_zero # include "stack_zero.hpp" #endif +#if INCLUDE_JFR +#include "jfr/support/jfrThreadExtension.hpp" +#endif class ThreadSafepointState; class ThreadProfiler; @@ -260,7 +261,7 @@ // Thread-local buffer used by MetadataOnStackMark. MetadataOnStackBuffer* _metadata_on_stack_buffer; - TRACE_DATA _trace_data; // Thread-local data for tracing + JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr ThreadExt _ext; @@ -441,7 +442,7 @@ void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } inline jlong cooked_allocated_bytes(); - TRACE_DATA* trace_data() { return &_trace_data; } + JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) const ThreadExt& ext() const { return _ext; } ThreadExt& ext() { return _ext; } @@ -626,6 +627,8 @@ static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes ); } + JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) + public: volatile intptr_t _Stalled ; volatile int _TypeTag ; --- old/src/share/vm/runtime/vmStructs.cpp 2019-02-15 19:02:46.190527250 +0300 +++ new/src/share/vm/runtime/vmStructs.cpp 2019-02-15 19:02:46.074531300 +0300 @@ -169,10 +169,6 @@ #include "gc_implementation/g1/vmStructs_g1.hpp" #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - #include "runtime/vmStructs_trace.hpp" -#endif - #ifdef COMPILER2 #include "opto/addnode.hpp" #include "opto/block.hpp" @@ -2908,11 +2904,6 @@ GENERATE_STATIC_VM_STRUCT_ENTRY) #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, - GENERATE_STATIC_VM_STRUCT_ENTRY) -#endif - VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_STATIC_VM_STRUCT_ENTRY, GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, @@ -2958,11 +2949,6 @@ GENERATE_TOPLEVEL_VM_TYPE_ENTRY) #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - VM_TYPES_TRACE(GENERATE_VM_TYPE_ENTRY, - GENERATE_TOPLEVEL_VM_TYPE_ENTRY) -#endif - VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY, GENERATE_TOPLEVEL_VM_TYPE_ENTRY, GENERATE_OOP_VM_TYPE_ENTRY, @@ -2998,10 +2984,6 @@ VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY) #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - VM_INT_CONSTANTS_TRACE(GENERATE_VM_INT_CONSTANT_ENTRY) -#endif - VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY, GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY, GENERATE_C1_VM_INT_CONSTANT_ENTRY, @@ -3067,11 +3049,6 @@ #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY, - CHECK_STATIC_VM_STRUCT_ENTRY); -#endif - VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY, CHECK_NO_OP, @@ -3113,11 +3090,6 @@ #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - VM_TYPES_TRACE(CHECK_VM_TYPE_ENTRY, - CHECK_SINGLE_ARG_VM_TYPE_NO_OP); -#endif - VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY, CHECK_SINGLE_ARG_VM_TYPE_NO_OP, CHECK_SINGLE_ARG_VM_TYPE_NO_OP, @@ -3181,11 +3153,6 @@ ENSURE_FIELD_TYPE_PRESENT)); #endif // INCLUDE_ALL_GCS -#if INCLUDE_TRACE - debug_only(VM_STRUCTS_TRACE(ENSURE_FIELD_TYPE_PRESENT, - ENSURE_FIELD_TYPE_PRESENT)); -#endif - debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT, CHECK_NO_OP, --- old/src/share/vm/runtime/vmThread.cpp 2019-02-15 19:02:46.598513004 +0300 +++ new/src/share/vm/runtime/vmThread.cpp 2019-02-15 19:02:46.494516635 +0300 @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "compiler/compileBroker.hpp" #include "gc_interface/collectedHeap.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrThreadId.hpp" #include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" @@ -35,7 +37,6 @@ #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" -#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" @@ -358,6 +359,23 @@ st->cr(); } +static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) { + assert(event != NULL, "invariant"); + assert(event->should_commit(), "invariant"); + assert(op != NULL, "invariant"); + const bool is_concurrent = op->evaluate_concurrently(); + const bool evaluate_at_safepoint = op->evaluate_at_safepoint(); + event->set_operation(op->type()); + event->set_safepoint(evaluate_at_safepoint); + event->set_blocking(!is_concurrent); + // Only write caller thread information for non-concurrent vm operations. + // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. + // This is because the caller thread could have exited already. + event->set_caller(is_concurrent ? 0 : JFR_THREAD_ID(op->calling_thread())); + event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_counter() : 0); + event->commit(); +} + void VMThread::evaluate_operation(VM_Operation* op) { ResourceMark rm; @@ -373,19 +391,9 @@ #endif /* USDT2 */ EventExecuteVMOperation event; - op->evaluate(); - if (event.should_commit()) { - bool is_concurrent = op->evaluate_concurrently(); - event.set_operation(op->type()); - event.set_safepoint(op->evaluate_at_safepoint()); - event.set_blocking(!is_concurrent); - // Only write caller thread information for non-concurrent vm operations. - // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. - // This is because the caller thread could have exited already. - event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id()); - event.commit(); + post_vm_operation_event(&event, op); } #ifndef USDT2 --- old/src/share/vm/runtime/vm_operations.cpp 2019-02-15 19:02:46.946500853 +0300 +++ new/src/share/vm/runtime/vm_operations.cpp 2019-02-15 19:02:46.854504066 +0300 @@ -37,7 +37,6 @@ #include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" #include "services/threadService.hpp" -#include "trace/tracing.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC --- old/src/share/vm/runtime/vm_version.hpp 2019-02-15 19:02:47.210491636 +0300 +++ new/src/share/vm/runtime/vm_version.hpp 2019-02-15 19:02:47.098495546 +0300 @@ -98,6 +98,7 @@ // does HW support an 8-byte compare-exchange operation? static bool supports_cx8() { + assert(_initialized, "not initialized"); #ifdef SUPPORTS_NATIVE_CX8 return true; #else @@ -106,10 +107,10 @@ } // does HW support atomic get-and-set or atomic get-and-add? Used // to guide intrinsification decisions for Unsafe atomic ops - static bool supports_atomic_getset4() {return _supports_atomic_getset4;} - static bool supports_atomic_getset8() {return _supports_atomic_getset8;} - static bool supports_atomic_getadd4() {return _supports_atomic_getadd4;} - static bool supports_atomic_getadd8() {return _supports_atomic_getadd8;} + static bool supports_atomic_getset4() { assert(_initialized, "not initialized"); return _supports_atomic_getset4;} + static bool supports_atomic_getset8() { assert(_initialized, "not initialized"); return _supports_atomic_getset8;} + static bool supports_atomic_getadd4() { assert(_initialized, "not initialized"); return _supports_atomic_getadd4;} + static bool supports_atomic_getadd8() { assert(_initialized, "not initialized"); return _supports_atomic_getadd8;} static unsigned int logical_processors_per_package() { return _logical_processors_per_package; --- old/src/share/vm/services/diagnosticArgument.cpp 2019-02-15 19:02:47.450483257 +0300 +++ new/src/share/vm/services/diagnosticArgument.cpp 2019-02-15 19:02:47.350486748 +0300 @@ -280,7 +280,7 @@ size_t len, TRAPS) { if (str == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error nanotime value: syntax error"); + "Parsing error memory size value: syntax error, value is null\n"); } if (*str == '-') { --- old/src/share/vm/services/memTracker.hpp 2019-02-15 19:02:47.734473341 +0300 +++ new/src/share/vm/services/memTracker.hpp 2019-02-15 19:02:47.610477671 +0300 @@ -75,7 +75,7 @@ #else -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/threadCritical.hpp" #include "services/mallocTracker.hpp" #include "services/virtualMemoryTracker.hpp" --- old/src/share/vm/utilities/bitMap.inline.hpp 2019-02-15 19:02:47.994464264 +0300 +++ new/src/share/vm/utilities/bitMap.inline.hpp 2019-02-15 19:02:47.874468453 +0300 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_UTILITIES_BITMAP_INLINE_HPP #define SHARE_VM_UTILITIES_BITMAP_INLINE_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/bitMap.hpp" #ifdef ASSERT --- old/src/share/vm/utilities/globalDefinitions.hpp 2019-02-15 19:02:48.262454908 +0300 +++ new/src/share/vm/utilities/globalDefinitions.hpp 2019-02-15 19:02:48.166458259 +0300 @@ -42,6 +42,14 @@ # include "utilities/globalDefinitions_xlc.hpp" #endif +// Defaults for macros that might be defined per compiler. +#ifndef NOINLINE +#define NOINLINE +#endif +#ifndef ALWAYSINLINE +#define ALWAYSINLINE inline +#endif + #ifndef PRAGMA_DIAG_PUSH #define PRAGMA_DIAG_PUSH #endif --- old/src/share/vm/utilities/globalDefinitions_gcc.hpp 2019-02-15 19:02:48.598443178 +0300 +++ new/src/share/vm/utilities/globalDefinitions_gcc.hpp 2019-02-15 19:02:48.498446670 +0300 @@ -263,17 +263,18 @@ #define PRAGMA_IMPLEMENTATION #pragma implementation #define VALUE_OBJ_CLASS_SPEC -#ifndef ATTRIBUTE_PRINTF // Diagnostic pragmas like the ones defined below in PRAGMA_FORMAT_NONLITERAL_IGNORED // were only introduced in GCC 4.2. Because we have no other possibility to ignore // these warnings for older versions of GCC, we simply don't decorate our printf-style // functions with __attribute__(format) in that case. #if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ > 4) +#ifndef ATTRIBUTE_PRINTF #define ATTRIBUTE_PRINTF(fmt,vargs) __attribute__((format(printf, fmt, vargs))) -#else -#define ATTRIBUTE_PRINTF(fmt,vargs) #endif +#ifndef ATTRIBUTE_SCANF +#define ATTRIBUTE_SCANF(fmt,vargs) __attribute__((format(scanf, fmt, vargs))) #endif +#endif // gcc version check #define PRAGMA_FORMAT_NONLITERAL_IGNORED _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"") \ _Pragma("GCC diagnostic ignored \"-Wformat-security\"") @@ -333,4 +334,8 @@ #define JLONG_FORMAT "%ld" #endif // _LP64 && __APPLE__ +// Inlining support +#define NOINLINE __attribute__ ((noinline)) +#define ALWAYSINLINE inline __attribute__ ((always_inline)) + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP --- old/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp 2019-02-15 19:02:48.914432147 +0300 +++ new/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp 2019-02-15 19:02:48.790436476 +0300 @@ -281,4 +281,8 @@ #define offset_of(klass,field) offsetof(klass,field) +// Inlining support +#define NOINLINE +#define ALWAYSINLINE inline __attribute__((always_inline)) + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP --- old/src/share/vm/utilities/globalDefinitions_visCPP.hpp 2019-02-15 19:02:49.298418743 +0300 +++ new/src/share/vm/utilities/globalDefinitions_visCPP.hpp 2019-02-15 19:02:49.130424606 +0300 @@ -171,6 +171,11 @@ #define strdup _strdup #endif +#if _MSC_VER < 1800 +// Fixes some wrong warnings about 'this' : used in base member initializer list +#pragma warning( disable : 4355 ) +#endif + #pragma warning( disable : 4100 ) // unreferenced formal parameter #pragma warning( disable : 4127 ) // conditional expression is constant #pragma warning( disable : 4514 ) // unreferenced inline function has been removed @@ -218,4 +223,11 @@ #define offset_of(klass,field) offsetof(klass,field) +// Inlining support +// MSVC has '__declspec(noinline)' but according to the official documentation +// it only applies to member functions. There are reports though which pretend +// that it also works for freestanding functions. +#define NOINLINE __declspec(noinline) +#define ALWAYSINLINE __forceinline + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP --- old/src/share/vm/utilities/globalDefinitions_xlc.hpp 2019-02-15 19:02:49.654406316 +0300 +++ new/src/share/vm/utilities/globalDefinitions_xlc.hpp 2019-02-15 19:02:49.526410784 +0300 @@ -180,5 +180,7 @@ #define SIZE_64G ((uint64_t) 0x1000000000ULL) #define SIZE_1T ((uint64_t) 0x10000000000ULL) +#define NOINLINE __attribute__((__noinline__)) +#define ALWAYSINLINE inline __attribute__((__always_inline__)) #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP --- old/src/share/vm/utilities/growableArray.hpp 2019-02-15 19:02:49.878398497 +0300 +++ new/src/share/vm/utilities/growableArray.hpp 2019-02-15 19:02:49.782401848 +0300 @@ -168,6 +168,10 @@ GrowableArray(int initial_size, bool C_heap = false, MEMFLAGS F = mtInternal) : GenericGrowableArray(initial_size, 0, C_heap, F) { _data = (E*)raw_allocate(sizeof(E)); +// Needed for Visual Studio 2012 and older +#ifdef _MSC_VER +#pragma warning(suppress: 4345) +#endif for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E(); } @@ -372,6 +376,40 @@ void sort(int f(E*,E*), int stride) { qsort(_data, length() / stride, sizeof(E) * stride, (_sort_Fn)f); } + + // Binary search and insertion utility. Search array for element + // matching key according to the static compare function. Insert + // that element is not already in the list. Assumes the list is + // already sorted according to compare function. + template E insert_sorted(const E& key) { + bool found; + int location = find_sorted(key, found); + if (!found) { + insert_before(location, key); + } + return at(location); + } + + template int find_sorted(const K& key, bool& found) { + found = false; + int min = 0; + int max = length() - 1; + + while (max >= min) { + int mid = (int)(((uint)max + min) / 2); + E value = at(mid); + int diff = compare(key, value); + if (diff > 0) { + min = mid + 1; + } else if (diff < 0) { + max = mid - 1; + } else { + found = true; + return mid; + } + } + return min; + } }; // Global GrowableArray methods (one instance in the library per each 'E' type). @@ -385,6 +423,10 @@ E* newData = (E*)raw_allocate(sizeof(E)); int i = 0; for ( ; i < _len; i++) ::new ((void*)&newData[i]) E(_data[i]); +// Needed for Visual Studio 2012 and older +#ifdef _MSC_VER +#pragma warning(suppress: 4345) +#endif for ( ; i < _max; i++) ::new ((void*)&newData[i]) E(); for (i = 0; i < old_max; i++) _data[i].~E(); if (on_C_heap() && _data != NULL) { --- old/src/share/vm/utilities/macros.hpp 2019-02-15 19:02:50.142389282 +0300 +++ new/src/share/vm/utilities/macros.hpp 2019-02-15 19:02:50.046392632 +0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -160,9 +160,15 @@ #define NOT_NMT_RETURN_(code) { return code; } #endif // INCLUDE_NMT -#ifndef INCLUDE_TRACE -#define INCLUDE_TRACE 1 -#endif // INCLUDE_TRACE +#ifndef INCLUDE_JFR +#define INCLUDE_JFR 1 +#endif + +#if INCLUDE_JFR +#define JFR_ONLY(code) code +#else +#define JFR_ONLY(code) +#endif // COMPILER1 variant #ifdef COMPILER1 --- old/src/share/vm/utilities/ticks.cpp 2019-02-15 19:02:50.454378392 +0300 +++ new/src/share/vm/utilities/ticks.cpp 2019-02-15 19:02:50.286384255 +0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,45 +24,113 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "utilities/ticks.inline.hpp" +#include "utilities/ticks.hpp" -#ifdef ASSERT - const jlong Ticks::invalid_time_stamp = -2; // 0xFFFF FFFF`FFFF FFFE +#ifdef X86 +#include "rdtsc_x86.hpp" #endif -void Ticks::stamp() { - _stamp_ticks = os::elapsed_counter(); +template +inline double conversion(typename TimeSource::Type& value) { + return (double)value * ((double)unit / (double)TimeSource::frequency()); } -const Ticks Ticks::now() { - Ticks t; - t.stamp(); - return t; +uint64_t ElapsedCounterSource::frequency() { + static const uint64_t freq = (uint64_t)os::elapsed_frequency(); + return freq; } -Tickspan::Tickspan(const Ticks& end, const Ticks& start) { - assert(end.value() != Ticks::invalid_time_stamp, "end is unstamped!"); - assert(start.value() != Ticks::invalid_time_stamp, "start is unstamped!"); +ElapsedCounterSource::Type ElapsedCounterSource::now() { + return os::elapsed_counter(); +} + +double ElapsedCounterSource::seconds(Type value) { + return conversion(value); +} - assert(end >= start, "negative time!"); +uint64_t ElapsedCounterSource::milliseconds(Type value) { + return (uint64_t)conversion(value); +} - _span_ticks = end.value() - start.value(); +uint64_t ElapsedCounterSource::microseconds(Type value) { + return (uint64_t)conversion(value); } -template -static ReturnType time_conversion(const Tickspan& span, TicksToTimeHelper::Unit unit) { - assert(TicksToTimeHelper::SECONDS == unit || - TicksToTimeHelper::MILLISECONDS == unit, "invalid unit!"); +uint64_t ElapsedCounterSource::nanoseconds(Type value) { + return (uint64_t)conversion(value); +} + +uint64_t FastUnorderedElapsedCounterSource::frequency() { +#ifdef X86 + static bool valid_rdtsc = Rdtsc::initialize(); + if (valid_rdtsc) { + static const uint64_t freq = (uint64_t)Rdtsc::frequency(); + return freq; + } +#endif + static const uint64_t freq = (uint64_t)os::elapsed_frequency(); + return freq; +} + +FastUnorderedElapsedCounterSource::Type FastUnorderedElapsedCounterSource::now() { +#ifdef X86 + static bool valid_rdtsc = Rdtsc::initialize(); + if (valid_rdtsc) { + return Rdtsc::elapsed_counter(); + } +#endif + return os::elapsed_counter(); +} + +double FastUnorderedElapsedCounterSource::seconds(Type value) { + return conversion(value); +} - ReturnType frequency_per_unit = (ReturnType)os::elapsed_frequency() / (ReturnType)unit; +uint64_t FastUnorderedElapsedCounterSource::milliseconds(Type value) { + return (uint64_t)conversion(value); +} + +uint64_t FastUnorderedElapsedCounterSource::microseconds(Type value) { + return (uint64_t)conversion(value); +} + +uint64_t FastUnorderedElapsedCounterSource::nanoseconds(Type value) { + return (uint64_t)conversion(value); +} + +uint64_t CompositeElapsedCounterSource::frequency() { + return ElapsedCounterSource::frequency(); +} + +CompositeElapsedCounterSource::Type CompositeElapsedCounterSource::now() { + CompositeTime ct; + ct.val1 = ElapsedCounterSource::now(); +#ifdef X86 + static bool initialized = false; + static bool valid_rdtsc = false; + if (!initialized) { + valid_rdtsc = Rdtsc::initialize(); + initialized = true; + } + if (valid_rdtsc) { + ct.val2 = Rdtsc::elapsed_counter(); + } +#endif + return ct; +} + +double CompositeElapsedCounterSource::seconds(Type value) { + return conversion(value.val1); +} - return (ReturnType) ((ReturnType)span.value() / frequency_per_unit); +uint64_t CompositeElapsedCounterSource::milliseconds(Type value) { + return (uint64_t)conversion(value.val1); } -double TicksToTimeHelper::seconds(const Tickspan& span) { - return time_conversion(span, SECONDS); +uint64_t CompositeElapsedCounterSource::microseconds(Type value) { + return (uint64_t)conversion(value.val1); } -jlong TicksToTimeHelper::milliseconds(const Tickspan& span) { - return time_conversion(span, MILLISECONDS); +uint64_t CompositeElapsedCounterSource::nanoseconds(Type value) { + return (uint64_t)conversion(value.val1); } --- old/src/share/vm/utilities/ticks.hpp 2019-02-15 19:02:50.730368758 +0300 +++ new/src/share/vm/utilities/ticks.hpp 2019-02-15 19:02:50.602373226 +0300 @@ -1,111 +1,249 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ +* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +* +*/ #ifndef SHARE_VM_UTILITIES_TICKS_HPP #define SHARE_VM_UTILITIES_TICKS_HPP +#include "jni.h" #include "memory/allocation.hpp" -#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" -class Ticks; +// Time sources +class ElapsedCounterSource { + public: + typedef jlong Type; + static uint64_t frequency(); + static Type now(); + static double seconds(Type value); + static uint64_t milliseconds(Type value); + static uint64_t microseconds(Type value); + static uint64_t nanoseconds(Type value); +}; -class Tickspan VALUE_OBJ_CLASS_SPEC { - friend class Ticks; - friend Tickspan operator-(const Ticks& end, const Ticks& start); +// Not guaranteed to be synchronized across hardware threads and +// therefore software threads, and can be updated asynchronously +// by software. now() can jump backwards as well as jump forward +// when threads query different cores/sockets. +// Very much not recommended for general use. Caveat emptor. +class FastUnorderedElapsedCounterSource { + public: + typedef jlong Type; + static uint64_t frequency(); + static Type now(); + static double seconds(Type value); + static uint64_t milliseconds(Type value); + static uint64_t microseconds(Type value); + static uint64_t nanoseconds(Type value); +}; - private: - jlong _span_ticks; +template +class PairRep { + public: + T1 val1; + T2 val2; + + PairRep() : val1((T1)0), val2((T2)0) {} + void operator+=(const PairRep& rhs) { + val1 += rhs.val1; + val2 += rhs.val2; + } + void operator-=(const PairRep& rhs) { + val1 -= rhs.val1; + val2 -= rhs.val2; + } + bool operator==(const PairRep& rhs) const { + return val1 == rhs.val1; + } + bool operator!=(const PairRep& rhs) const { + return !operator==(rhs); + } + bool operator<(const PairRep& rhs) const { + return val1 < rhs.val1; + } + bool operator>(const PairRep& rhs) const { + return val1 > rhs.val1; + } +}; - Tickspan(const Ticks& end, const Ticks& start); +template +PairRep operator-(const PairRep& lhs, const PairRep& rhs) { + PairRep temp(lhs); + temp -= rhs; + return temp; +} +typedef PairRep CompositeTime; + +class CompositeElapsedCounterSource { public: - Tickspan() : _span_ticks(0) {} + typedef CompositeTime Type; + static uint64_t frequency(); + static Type now(); + static double seconds(Type value); + static uint64_t milliseconds(Type value); + static uint64_t microseconds(Type value); + static uint64_t nanoseconds(Type value); +}; - Tickspan& operator+=(const Tickspan& rhs) { - _span_ticks += rhs._span_ticks; - return *this; +template +class Representation { + public: + typedef typename TimeSource::Type Type; + protected: + Type _rep; + Representation(const Representation& end, const Representation& start) : _rep(end._rep - start._rep) {} + Representation() : _rep() {} + public: + void operator+=(const Representation& rhs) { + _rep += rhs._rep; + } + void operator-=(const Representation& rhs) { + _rep -= rhs._rep; + } + bool operator==(const Representation& rhs) const { + return _rep == rhs._rep; + } + bool operator!=(const Representation& rhs) const { + return !operator==(rhs); + } + bool operator<(const Representation& rhs) const { + return _rep < rhs._rep; + } + bool operator>(const Representation& rhs) const { + return _rep > rhs._rep; + } + bool operator<=(const Representation& rhs) const { + return !operator>(rhs); + } + bool operator>=(const Representation& rhs) const { + return !operator<(rhs); + } + double seconds() const { + return TimeSource::seconds(_rep); + } + uint64_t milliseconds() const { + return TimeSource::milliseconds(_rep); } + uint64_t microseconds() const { + return TimeSource::microseconds(_rep); + } + uint64_t nanoseconds() const { + return TimeSource::nanoseconds(_rep); + } +}; - jlong value() const { - return _span_ticks; +template +class CounterRepresentation : public Representation { + protected: + CounterRepresentation(const CounterRepresentation& end, const CounterRepresentation& start) : Representation(end, start) {} + explicit CounterRepresentation(jlong value) : Representation() { + this->_rep = value; } + public: + CounterRepresentation() : Representation() {} + typename TimeSource::Type value() const { return this->_rep; } + operator typename TimeSource::Type() { return value(); } +}; +template +class CompositeCounterRepresentation : public Representation { + protected: + CompositeCounterRepresentation(const CompositeCounterRepresentation& end, const CompositeCounterRepresentation& start) : + Representation(end, start) {} + explicit CompositeCounterRepresentation(jlong value) : Representation() { + this->_rep.val1 = value; + this->_rep.val2 = value; + } + public: + CompositeCounterRepresentation() : Representation() {} + ElapsedCounterSource::Type value() const { return this->_rep.val1; } + FastUnorderedElapsedCounterSource::Type ft_value() const { return this->_rep.val2; } }; -class Ticks VALUE_OBJ_CLASS_SPEC { - private: - jlong _stamp_ticks; +template